1/* net/sched/sch_atm.c - ATM VC selection "queueing discipline" */ 2 3/* Written 1998-2000 by Werner Almesberger, EPFL ICA */ 4 5#include <linux/module.h> 6#include <linux/slab.h> 7#include <linux/init.h> 8#include <linux/interrupt.h> 9#include <linux/string.h> 10#include <linux/errno.h> 11#include <linux/skbuff.h> 12#include <linux/atmdev.h> 13#include <linux/atmclip.h> 14#include <linux/rtnetlink.h> 15#include <linux/file.h> /* for fput */ 16#include <net/netlink.h> 17#include <net/pkt_sched.h> 18 19/* 20 * The ATM queuing discipline provides a framework for invoking classifiers 21 * (aka "filters"), which in turn select classes of this queuing discipline. 22 * Each class maps the flow(s) it is handling to a given VC. Multiple classes 23 * may share the same VC. 24 * 25 * When creating a class, VCs are specified by passing the number of the open 26 * socket descriptor by which the calling process references the VC. The kernel 27 * keeps the VC open at least until all classes using it are removed. 28 * 29 * In this file, most functions are named atm_tc_* to avoid confusion with all 30 * the atm_* in net/atm. This naming convention differs from what's used in the 31 * rest of net/sched. 32 * 33 * Known bugs: 34 * - sometimes messes up the IP stack 35 * - any manipulations besides the few operations described in the README, are 36 * untested and likely to crash the system 37 * - should lock the flow while there is data in the queue (?) 38 */ 39 40#define VCC2FLOW(vcc) ((struct atm_flow_data *) ((vcc)->user_back)) 41 42struct atm_flow_data { 43 struct Qdisc *q; /* FIFO, TBF, etc. */ 44 struct tcf_proto __rcu *filter_list; 45 struct atm_vcc *vcc; /* VCC; NULL if VCC is closed */ 46 void (*old_pop)(struct atm_vcc *vcc, 47 struct sk_buff *skb); /* chaining */ 48 struct atm_qdisc_data *parent; /* parent qdisc */ 49 struct socket *sock; /* for closing */ 50 u32 classid; /* x:y type ID */ 51 int ref; /* reference count */ 52 struct gnet_stats_basic_packed bstats; 53 struct gnet_stats_queue qstats; 54 struct list_head list; 55 struct atm_flow_data *excess; /* flow for excess traffic; 56 NULL to set CLP instead */ 57 int hdr_len; 58 unsigned char hdr[0]; /* header data; MUST BE LAST */ 59}; 60 61struct atm_qdisc_data { 62 struct atm_flow_data link; /* unclassified skbs go here */ 63 struct list_head flows; /* NB: "link" is also on this 64 list */ 65 struct tasklet_struct task; /* dequeue tasklet */ 66}; 67 68/* ------------------------- Class/flow operations ------------------------- */ 69 70static inline struct atm_flow_data *lookup_flow(struct Qdisc *sch, u32 classid) 71{ 72 struct atm_qdisc_data *p = qdisc_priv(sch); 73 struct atm_flow_data *flow; 74 75 list_for_each_entry(flow, &p->flows, list) { 76 if (flow->classid == classid) 77 return flow; 78 } 79 return NULL; 80} 81 82static int atm_tc_graft(struct Qdisc *sch, unsigned long arg, 83 struct Qdisc *new, struct Qdisc **old) 84{ 85 struct atm_qdisc_data *p = qdisc_priv(sch); 86 struct atm_flow_data *flow = (struct atm_flow_data *)arg; 87 88 pr_debug("atm_tc_graft(sch %p,[qdisc %p],flow %p,new %p,old %p)\n", 89 sch, p, flow, new, old); 90 if (list_empty(&flow->list)) 91 return -EINVAL; 92 if (!new) 93 new = &noop_qdisc; 94 *old = flow->q; 95 flow->q = new; 96 if (*old) 97 qdisc_reset(*old); 98 return 0; 99} 100 101static struct Qdisc *atm_tc_leaf(struct Qdisc *sch, unsigned long cl) 102{ 103 struct atm_flow_data *flow = (struct atm_flow_data *)cl; 104 105 pr_debug("atm_tc_leaf(sch %p,flow %p)\n", sch, flow); 106 return flow ? flow->q : NULL; 107} 108 109static unsigned long atm_tc_get(struct Qdisc *sch, u32 classid) 110{ 111 struct atm_qdisc_data *p __maybe_unused = qdisc_priv(sch); 112 struct atm_flow_data *flow; 113 114 pr_debug("atm_tc_get(sch %p,[qdisc %p],classid %x)\n", sch, p, classid); 115 flow = lookup_flow(sch, classid); 116 if (flow) 117 flow->ref++; 118 pr_debug("atm_tc_get: flow %p\n", flow); 119 return (unsigned long)flow; 120} 121 122static unsigned long atm_tc_bind_filter(struct Qdisc *sch, 123 unsigned long parent, u32 classid) 124{ 125 return atm_tc_get(sch, classid); 126} 127 128/* 129 * atm_tc_put handles all destructions, including the ones that are explicitly 130 * requested (atm_tc_destroy, etc.). The assumption here is that we never drop 131 * anything that still seems to be in use. 132 */ 133static void atm_tc_put(struct Qdisc *sch, unsigned long cl) 134{ 135 struct atm_qdisc_data *p = qdisc_priv(sch); 136 struct atm_flow_data *flow = (struct atm_flow_data *)cl; 137 138 pr_debug("atm_tc_put(sch %p,[qdisc %p],flow %p)\n", sch, p, flow); 139 if (--flow->ref) 140 return; 141 pr_debug("atm_tc_put: destroying\n"); 142 list_del_init(&flow->list); 143 pr_debug("atm_tc_put: qdisc %p\n", flow->q); 144 qdisc_destroy(flow->q); 145 tcf_destroy_chain(&flow->filter_list); 146 if (flow->sock) { 147 pr_debug("atm_tc_put: f_count %ld\n", 148 file_count(flow->sock->file)); 149 flow->vcc->pop = flow->old_pop; 150 sockfd_put(flow->sock); 151 } 152 if (flow->excess) 153 atm_tc_put(sch, (unsigned long)flow->excess); 154 if (flow != &p->link) 155 kfree(flow); 156 /* 157 * If flow == &p->link, the qdisc no longer works at this point and 158 * needs to be removed. (By the caller of atm_tc_put.) 159 */ 160} 161 162static void sch_atm_pop(struct atm_vcc *vcc, struct sk_buff *skb) 163{ 164 struct atm_qdisc_data *p = VCC2FLOW(vcc)->parent; 165 166 pr_debug("sch_atm_pop(vcc %p,skb %p,[qdisc %p])\n", vcc, skb, p); 167 VCC2FLOW(vcc)->old_pop(vcc, skb); 168 tasklet_schedule(&p->task); 169} 170 171static const u8 llc_oui_ip[] = { 172 0xaa, /* DSAP: non-ISO */ 173 0xaa, /* SSAP: non-ISO */ 174 0x03, /* Ctrl: Unnumbered Information Command PDU */ 175 0x00, /* OUI: EtherType */ 176 0x00, 0x00, 177 0x08, 0x00 178}; /* Ethertype IP (0800) */ 179 180static const struct nla_policy atm_policy[TCA_ATM_MAX + 1] = { 181 [TCA_ATM_FD] = { .type = NLA_U32 }, 182 [TCA_ATM_EXCESS] = { .type = NLA_U32 }, 183}; 184 185static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent, 186 struct nlattr **tca, unsigned long *arg) 187{ 188 struct atm_qdisc_data *p = qdisc_priv(sch); 189 struct atm_flow_data *flow = (struct atm_flow_data *)*arg; 190 struct atm_flow_data *excess = NULL; 191 struct nlattr *opt = tca[TCA_OPTIONS]; 192 struct nlattr *tb[TCA_ATM_MAX + 1]; 193 struct socket *sock; 194 int fd, error, hdr_len; 195 void *hdr; 196 197 pr_debug("atm_tc_change(sch %p,[qdisc %p],classid %x,parent %x," 198 "flow %p,opt %p)\n", sch, p, classid, parent, flow, opt); 199 /* 200 * The concept of parents doesn't apply for this qdisc. 201 */ 202 if (parent && parent != TC_H_ROOT && parent != sch->handle) 203 return -EINVAL; 204 /* 205 * ATM classes cannot be changed. In order to change properties of the 206 * ATM connection, that socket needs to be modified directly (via the 207 * native ATM API. In order to send a flow to a different VC, the old 208 * class needs to be removed and a new one added. (This may be changed 209 * later.) 210 */ 211 if (flow) 212 return -EBUSY; 213 if (opt == NULL) 214 return -EINVAL; 215 216 error = nla_parse_nested(tb, TCA_ATM_MAX, opt, atm_policy); 217 if (error < 0) 218 return error; 219 220 if (!tb[TCA_ATM_FD]) 221 return -EINVAL; 222 fd = nla_get_u32(tb[TCA_ATM_FD]); 223 pr_debug("atm_tc_change: fd %d\n", fd); 224 if (tb[TCA_ATM_HDR]) { 225 hdr_len = nla_len(tb[TCA_ATM_HDR]); 226 hdr = nla_data(tb[TCA_ATM_HDR]); 227 } else { 228 hdr_len = RFC1483LLC_LEN; 229 hdr = NULL; /* default LLC/SNAP for IP */ 230 } 231 if (!tb[TCA_ATM_EXCESS]) 232 excess = NULL; 233 else { 234 excess = (struct atm_flow_data *) 235 atm_tc_get(sch, nla_get_u32(tb[TCA_ATM_EXCESS])); 236 if (!excess) 237 return -ENOENT; 238 } 239 pr_debug("atm_tc_change: type %d, payload %d, hdr_len %d\n", 240 opt->nla_type, nla_len(opt), hdr_len); 241 sock = sockfd_lookup(fd, &error); 242 if (!sock) 243 return error; /* f_count++ */ 244 pr_debug("atm_tc_change: f_count %ld\n", file_count(sock->file)); 245 if (sock->ops->family != PF_ATMSVC && sock->ops->family != PF_ATMPVC) { 246 error = -EPROTOTYPE; 247 goto err_out; 248 } 249 /* @@@ should check if the socket is really operational or we'll crash 250 on vcc->send */ 251 if (classid) { 252 if (TC_H_MAJ(classid ^ sch->handle)) { 253 pr_debug("atm_tc_change: classid mismatch\n"); 254 error = -EINVAL; 255 goto err_out; 256 } 257 } else { 258 int i; 259 unsigned long cl; 260 261 for (i = 1; i < 0x8000; i++) { 262 classid = TC_H_MAKE(sch->handle, 0x8000 | i); 263 cl = atm_tc_get(sch, classid); 264 if (!cl) 265 break; 266 atm_tc_put(sch, cl); 267 } 268 } 269 pr_debug("atm_tc_change: new id %x\n", classid); 270 flow = kzalloc(sizeof(struct atm_flow_data) + hdr_len, GFP_KERNEL); 271 pr_debug("atm_tc_change: flow %p\n", flow); 272 if (!flow) { 273 error = -ENOBUFS; 274 goto err_out; 275 } 276 RCU_INIT_POINTER(flow->filter_list, NULL); 277 flow->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid); 278 if (!flow->q) 279 flow->q = &noop_qdisc; 280 pr_debug("atm_tc_change: qdisc %p\n", flow->q); 281 flow->sock = sock; 282 flow->vcc = ATM_SD(sock); /* speedup */ 283 flow->vcc->user_back = flow; 284 pr_debug("atm_tc_change: vcc %p\n", flow->vcc); 285 flow->old_pop = flow->vcc->pop; 286 flow->parent = p; 287 flow->vcc->pop = sch_atm_pop; 288 flow->classid = classid; 289 flow->ref = 1; 290 flow->excess = excess; 291 list_add(&flow->list, &p->link.list); 292 flow->hdr_len = hdr_len; 293 if (hdr) 294 memcpy(flow->hdr, hdr, hdr_len); 295 else 296 memcpy(flow->hdr, llc_oui_ip, sizeof(llc_oui_ip)); 297 *arg = (unsigned long)flow; 298 return 0; 299err_out: 300 if (excess) 301 atm_tc_put(sch, (unsigned long)excess); 302 sockfd_put(sock); 303 return error; 304} 305 306static int atm_tc_delete(struct Qdisc *sch, unsigned long arg) 307{ 308 struct atm_qdisc_data *p = qdisc_priv(sch); 309 struct atm_flow_data *flow = (struct atm_flow_data *)arg; 310 311 pr_debug("atm_tc_delete(sch %p,[qdisc %p],flow %p)\n", sch, p, flow); 312 if (list_empty(&flow->list)) 313 return -EINVAL; 314 if (rcu_access_pointer(flow->filter_list) || flow == &p->link) 315 return -EBUSY; 316 /* 317 * Reference count must be 2: one for "keepalive" (set at class 318 * creation), and one for the reference held when calling delete. 319 */ 320 if (flow->ref < 2) { 321 pr_err("atm_tc_delete: flow->ref == %d\n", flow->ref); 322 return -EINVAL; 323 } 324 if (flow->ref > 2) 325 return -EBUSY; /* catch references via excess, etc. */ 326 atm_tc_put(sch, arg); 327 return 0; 328} 329 330static void atm_tc_walk(struct Qdisc *sch, struct qdisc_walker *walker) 331{ 332 struct atm_qdisc_data *p = qdisc_priv(sch); 333 struct atm_flow_data *flow; 334 335 pr_debug("atm_tc_walk(sch %p,[qdisc %p],walker %p)\n", sch, p, walker); 336 if (walker->stop) 337 return; 338 list_for_each_entry(flow, &p->flows, list) { 339 if (walker->count >= walker->skip && 340 walker->fn(sch, (unsigned long)flow, walker) < 0) { 341 walker->stop = 1; 342 break; 343 } 344 walker->count++; 345 } 346} 347 348static struct tcf_proto __rcu **atm_tc_find_tcf(struct Qdisc *sch, 349 unsigned long cl) 350{ 351 struct atm_qdisc_data *p = qdisc_priv(sch); 352 struct atm_flow_data *flow = (struct atm_flow_data *)cl; 353 354 pr_debug("atm_tc_find_tcf(sch %p,[qdisc %p],flow %p)\n", sch, p, flow); 355 return flow ? &flow->filter_list : &p->link.filter_list; 356} 357 358/* --------------------------- Qdisc operations ---------------------------- */ 359 360static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch) 361{ 362 struct atm_qdisc_data *p = qdisc_priv(sch); 363 struct atm_flow_data *flow; 364 struct tcf_result res; 365 int result; 366 int ret = NET_XMIT_POLICED; 367 368 pr_debug("atm_tc_enqueue(skb %p,sch %p,[qdisc %p])\n", skb, sch, p); 369 result = TC_POLICE_OK; /* be nice to gcc */ 370 flow = NULL; 371 if (TC_H_MAJ(skb->priority) != sch->handle || 372 !(flow = (struct atm_flow_data *)atm_tc_get(sch, skb->priority))) { 373 struct tcf_proto *fl; 374 375 list_for_each_entry(flow, &p->flows, list) { 376 fl = rcu_dereference_bh(flow->filter_list); 377 if (fl) { 378 result = tc_classify(skb, fl, &res, true); 379 if (result < 0) 380 continue; 381 flow = (struct atm_flow_data *)res.class; 382 if (!flow) 383 flow = lookup_flow(sch, res.classid); 384 goto done; 385 } 386 } 387 flow = NULL; 388done: 389 ; 390 } 391 if (!flow) { 392 flow = &p->link; 393 } else { 394 if (flow->vcc) 395 ATM_SKB(skb)->atm_options = flow->vcc->atm_options; 396 /*@@@ looks good ... but it's not supposed to work :-) */ 397#ifdef CONFIG_NET_CLS_ACT 398 switch (result) { 399 case TC_ACT_QUEUED: 400 case TC_ACT_STOLEN: 401 kfree_skb(skb); 402 return NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; 403 case TC_ACT_SHOT: 404 kfree_skb(skb); 405 goto drop; 406 case TC_POLICE_RECLASSIFY: 407 if (flow->excess) 408 flow = flow->excess; 409 else 410 ATM_SKB(skb)->atm_options |= ATM_ATMOPT_CLP; 411 break; 412 } 413#endif 414 } 415 416 ret = qdisc_enqueue(skb, flow->q); 417 if (ret != NET_XMIT_SUCCESS) { 418drop: __maybe_unused 419 if (net_xmit_drop_count(ret)) { 420 qdisc_qstats_drop(sch); 421 if (flow) 422 flow->qstats.drops++; 423 } 424 return ret; 425 } 426 /* 427 * Okay, this may seem weird. We pretend we've dropped the packet if 428 * it goes via ATM. The reason for this is that the outer qdisc 429 * expects to be able to q->dequeue the packet later on if we return 430 * success at this place. Also, sch->q.qdisc needs to reflect whether 431 * there is a packet egligible for dequeuing or not. Note that the 432 * statistics of the outer qdisc are necessarily wrong because of all 433 * this. There's currently no correct solution for this. 434 */ 435 if (flow == &p->link) { 436 sch->q.qlen++; 437 return NET_XMIT_SUCCESS; 438 } 439 tasklet_schedule(&p->task); 440 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; 441} 442 443/* 444 * Dequeue packets and send them over ATM. Note that we quite deliberately 445 * avoid checking net_device's flow control here, simply because sch_atm 446 * uses its own channels, which have nothing to do with any CLIP/LANE/or 447 * non-ATM interfaces. 448 */ 449 450static void sch_atm_dequeue(unsigned long data) 451{ 452 struct Qdisc *sch = (struct Qdisc *)data; 453 struct atm_qdisc_data *p = qdisc_priv(sch); 454 struct atm_flow_data *flow; 455 struct sk_buff *skb; 456 457 pr_debug("sch_atm_dequeue(sch %p,[qdisc %p])\n", sch, p); 458 list_for_each_entry(flow, &p->flows, list) { 459 if (flow == &p->link) 460 continue; 461 /* 462 * If traffic is properly shaped, this won't generate nasty 463 * little bursts. Otherwise, it may ... (but that's okay) 464 */ 465 while ((skb = flow->q->ops->peek(flow->q))) { 466 if (!atm_may_send(flow->vcc, skb->truesize)) 467 break; 468 469 skb = qdisc_dequeue_peeked(flow->q); 470 if (unlikely(!skb)) 471 break; 472 473 qdisc_bstats_update(sch, skb); 474 bstats_update(&flow->bstats, skb); 475 pr_debug("atm_tc_dequeue: sending on class %p\n", flow); 476 /* remove any LL header somebody else has attached */ 477 skb_pull(skb, skb_network_offset(skb)); 478 if (skb_headroom(skb) < flow->hdr_len) { 479 struct sk_buff *new; 480 481 new = skb_realloc_headroom(skb, flow->hdr_len); 482 dev_kfree_skb(skb); 483 if (!new) 484 continue; 485 skb = new; 486 } 487 pr_debug("sch_atm_dequeue: ip %p, data %p\n", 488 skb_network_header(skb), skb->data); 489 ATM_SKB(skb)->vcc = flow->vcc; 490 memcpy(skb_push(skb, flow->hdr_len), flow->hdr, 491 flow->hdr_len); 492 atomic_add(skb->truesize, 493 &sk_atm(flow->vcc)->sk_wmem_alloc); 494 /* atm.atm_options are already set by atm_tc_enqueue */ 495 flow->vcc->send(flow->vcc, skb); 496 } 497 } 498} 499 500static struct sk_buff *atm_tc_dequeue(struct Qdisc *sch) 501{ 502 struct atm_qdisc_data *p = qdisc_priv(sch); 503 struct sk_buff *skb; 504 505 pr_debug("atm_tc_dequeue(sch %p,[qdisc %p])\n", sch, p); 506 tasklet_schedule(&p->task); 507 skb = qdisc_dequeue_peeked(p->link.q); 508 if (skb) 509 sch->q.qlen--; 510 return skb; 511} 512 513static struct sk_buff *atm_tc_peek(struct Qdisc *sch) 514{ 515 struct atm_qdisc_data *p = qdisc_priv(sch); 516 517 pr_debug("atm_tc_peek(sch %p,[qdisc %p])\n", sch, p); 518 519 return p->link.q->ops->peek(p->link.q); 520} 521 522static unsigned int atm_tc_drop(struct Qdisc *sch) 523{ 524 struct atm_qdisc_data *p = qdisc_priv(sch); 525 struct atm_flow_data *flow; 526 unsigned int len; 527 528 pr_debug("atm_tc_drop(sch %p,[qdisc %p])\n", sch, p); 529 list_for_each_entry(flow, &p->flows, list) { 530 if (flow->q->ops->drop && (len = flow->q->ops->drop(flow->q))) 531 return len; 532 } 533 return 0; 534} 535 536static int atm_tc_init(struct Qdisc *sch, struct nlattr *opt) 537{ 538 struct atm_qdisc_data *p = qdisc_priv(sch); 539 540 pr_debug("atm_tc_init(sch %p,[qdisc %p],opt %p)\n", sch, p, opt); 541 INIT_LIST_HEAD(&p->flows); 542 INIT_LIST_HEAD(&p->link.list); 543 list_add(&p->link.list, &p->flows); 544 p->link.q = qdisc_create_dflt(sch->dev_queue, 545 &pfifo_qdisc_ops, sch->handle); 546 if (!p->link.q) 547 p->link.q = &noop_qdisc; 548 pr_debug("atm_tc_init: link (%p) qdisc %p\n", &p->link, p->link.q); 549 RCU_INIT_POINTER(p->link.filter_list, NULL); 550 p->link.vcc = NULL; 551 p->link.sock = NULL; 552 p->link.classid = sch->handle; 553 p->link.ref = 1; 554 tasklet_init(&p->task, sch_atm_dequeue, (unsigned long)sch); 555 return 0; 556} 557 558static void atm_tc_reset(struct Qdisc *sch) 559{ 560 struct atm_qdisc_data *p = qdisc_priv(sch); 561 struct atm_flow_data *flow; 562 563 pr_debug("atm_tc_reset(sch %p,[qdisc %p])\n", sch, p); 564 list_for_each_entry(flow, &p->flows, list) 565 qdisc_reset(flow->q); 566 sch->q.qlen = 0; 567} 568 569static void atm_tc_destroy(struct Qdisc *sch) 570{ 571 struct atm_qdisc_data *p = qdisc_priv(sch); 572 struct atm_flow_data *flow, *tmp; 573 574 pr_debug("atm_tc_destroy(sch %p,[qdisc %p])\n", sch, p); 575 list_for_each_entry(flow, &p->flows, list) 576 tcf_destroy_chain(&flow->filter_list); 577 578 list_for_each_entry_safe(flow, tmp, &p->flows, list) { 579 if (flow->ref > 1) 580 pr_err("atm_destroy: %p->ref = %d\n", flow, flow->ref); 581 atm_tc_put(sch, (unsigned long)flow); 582 } 583 tasklet_kill(&p->task); 584} 585 586static int atm_tc_dump_class(struct Qdisc *sch, unsigned long cl, 587 struct sk_buff *skb, struct tcmsg *tcm) 588{ 589 struct atm_qdisc_data *p = qdisc_priv(sch); 590 struct atm_flow_data *flow = (struct atm_flow_data *)cl; 591 struct nlattr *nest; 592 593 pr_debug("atm_tc_dump_class(sch %p,[qdisc %p],flow %p,skb %p,tcm %p)\n", 594 sch, p, flow, skb, tcm); 595 if (list_empty(&flow->list)) 596 return -EINVAL; 597 tcm->tcm_handle = flow->classid; 598 tcm->tcm_info = flow->q->handle; 599 600 nest = nla_nest_start(skb, TCA_OPTIONS); 601 if (nest == NULL) 602 goto nla_put_failure; 603 604 if (nla_put(skb, TCA_ATM_HDR, flow->hdr_len, flow->hdr)) 605 goto nla_put_failure; 606 if (flow->vcc) { 607 struct sockaddr_atmpvc pvc; 608 int state; 609 610 memset(&pvc, 0, sizeof(pvc)); 611 pvc.sap_family = AF_ATMPVC; 612 pvc.sap_addr.itf = flow->vcc->dev ? flow->vcc->dev->number : -1; 613 pvc.sap_addr.vpi = flow->vcc->vpi; 614 pvc.sap_addr.vci = flow->vcc->vci; 615 if (nla_put(skb, TCA_ATM_ADDR, sizeof(pvc), &pvc)) 616 goto nla_put_failure; 617 state = ATM_VF2VS(flow->vcc->flags); 618 if (nla_put_u32(skb, TCA_ATM_STATE, state)) 619 goto nla_put_failure; 620 } 621 if (flow->excess) { 622 if (nla_put_u32(skb, TCA_ATM_EXCESS, flow->classid)) 623 goto nla_put_failure; 624 } else { 625 if (nla_put_u32(skb, TCA_ATM_EXCESS, 0)) 626 goto nla_put_failure; 627 } 628 return nla_nest_end(skb, nest); 629 630nla_put_failure: 631 nla_nest_cancel(skb, nest); 632 return -1; 633} 634static int 635atm_tc_dump_class_stats(struct Qdisc *sch, unsigned long arg, 636 struct gnet_dump *d) 637{ 638 struct atm_flow_data *flow = (struct atm_flow_data *)arg; 639 640 if (gnet_stats_copy_basic(d, NULL, &flow->bstats) < 0 || 641 gnet_stats_copy_queue(d, NULL, &flow->qstats, flow->q->q.qlen) < 0) 642 return -1; 643 644 return 0; 645} 646 647static int atm_tc_dump(struct Qdisc *sch, struct sk_buff *skb) 648{ 649 return 0; 650} 651 652static const struct Qdisc_class_ops atm_class_ops = { 653 .graft = atm_tc_graft, 654 .leaf = atm_tc_leaf, 655 .get = atm_tc_get, 656 .put = atm_tc_put, 657 .change = atm_tc_change, 658 .delete = atm_tc_delete, 659 .walk = atm_tc_walk, 660 .tcf_chain = atm_tc_find_tcf, 661 .bind_tcf = atm_tc_bind_filter, 662 .unbind_tcf = atm_tc_put, 663 .dump = atm_tc_dump_class, 664 .dump_stats = atm_tc_dump_class_stats, 665}; 666 667static struct Qdisc_ops atm_qdisc_ops __read_mostly = { 668 .cl_ops = &atm_class_ops, 669 .id = "atm", 670 .priv_size = sizeof(struct atm_qdisc_data), 671 .enqueue = atm_tc_enqueue, 672 .dequeue = atm_tc_dequeue, 673 .peek = atm_tc_peek, 674 .drop = atm_tc_drop, 675 .init = atm_tc_init, 676 .reset = atm_tc_reset, 677 .destroy = atm_tc_destroy, 678 .dump = atm_tc_dump, 679 .owner = THIS_MODULE, 680}; 681 682static int __init atm_init(void) 683{ 684 return register_qdisc(&atm_qdisc_ops); 685} 686 687static void __exit atm_exit(void) 688{ 689 unregister_qdisc(&atm_qdisc_ops); 690} 691 692module_init(atm_init) 693module_exit(atm_exit) 694MODULE_LICENSE("GPL"); 695