/linux-4.4.14/drivers/s390/cio/ |
H A D | eadm_sch.c | 64 static int eadm_subchannel_start(struct subchannel *sch, struct aob *aob) eadm_subchannel_start() argument 66 union orb *orb = &get_eadm_private(sch)->orb; eadm_subchannel_start() 71 orb->eadm.intparm = (u32)(addr_t)sch; eadm_subchannel_start() 75 EADM_LOG_HEX(6, &sch->schid, sizeof(sch->schid)); eadm_subchannel_start() 77 cc = ssch(sch->schid, orb); eadm_subchannel_start() 80 sch->schib.scsw.eadm.actl |= SCSW_ACTL_START_PEND; eadm_subchannel_start() 91 static int eadm_subchannel_clear(struct subchannel *sch) eadm_subchannel_clear() argument 95 cc = csch(sch->schid); eadm_subchannel_clear() 99 sch->schib.scsw.eadm.actl |= SCSW_ACTL_CLEAR_PEND; eadm_subchannel_clear() 105 struct subchannel *sch = (struct subchannel *) data; eadm_subchannel_timeout() local 107 spin_lock_irq(sch->lock); eadm_subchannel_timeout() 109 EADM_LOG_HEX(1, &sch->schid, sizeof(sch->schid)); eadm_subchannel_timeout() 110 if (eadm_subchannel_clear(sch)) eadm_subchannel_timeout() 112 spin_unlock_irq(sch->lock); eadm_subchannel_timeout() 115 static void eadm_subchannel_set_timeout(struct subchannel *sch, int expires) eadm_subchannel_set_timeout() argument 117 struct eadm_private *private = get_eadm_private(sch); eadm_subchannel_set_timeout() 128 private->timer.data = (unsigned long) sch; eadm_subchannel_set_timeout() 133 static void eadm_subchannel_irq(struct subchannel *sch) eadm_subchannel_irq() argument 135 struct eadm_private *private = get_eadm_private(sch); eadm_subchannel_irq() 136 struct eadm_scsw *scsw = &sch->schib.scsw.eadm; eadm_subchannel_irq() 152 eadm_subchannel_set_timeout(sch, 0); eadm_subchannel_irq() 158 css_sched_sch_todo(sch, SCH_TODO_EVAL); eadm_subchannel_irq() 171 struct subchannel *sch; eadm_get_idle_sch() local 176 sch = private->sch; eadm_get_idle_sch() 177 spin_lock(sch->lock); eadm_get_idle_sch() 181 spin_unlock(sch->lock); eadm_get_idle_sch() 184 return sch; eadm_get_idle_sch() 186 spin_unlock(sch->lock); eadm_get_idle_sch() 196 struct subchannel *sch; eadm_start_aob() local 200 sch = eadm_get_idle_sch(); eadm_start_aob() 201 if (!sch) eadm_start_aob() 204 spin_lock_irqsave(sch->lock, flags); eadm_start_aob() 205 eadm_subchannel_set_timeout(sch, EADM_TIMEOUT); eadm_start_aob() 206 ret = eadm_subchannel_start(sch, aob); eadm_start_aob() 211 eadm_subchannel_set_timeout(sch, 0); eadm_start_aob() 212 private = get_eadm_private(sch); eadm_start_aob() 214 css_sched_sch_todo(sch, SCH_TODO_EVAL); eadm_start_aob() 217 spin_unlock_irqrestore(sch->lock, flags); eadm_start_aob() 223 static int eadm_subchannel_probe(struct subchannel *sch) eadm_subchannel_probe() argument 235 spin_lock_irq(sch->lock); eadm_subchannel_probe() 236 set_eadm_private(sch, private); eadm_subchannel_probe() 238 private->sch = sch; eadm_subchannel_probe() 239 sch->isc = EADM_SCH_ISC; eadm_subchannel_probe() 240 ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch); eadm_subchannel_probe() 242 set_eadm_private(sch, NULL); eadm_subchannel_probe() 243 spin_unlock_irq(sch->lock); eadm_subchannel_probe() 247 spin_unlock_irq(sch->lock); eadm_subchannel_probe() 253 if (dev_get_uevent_suppress(&sch->dev)) { eadm_subchannel_probe() 254 dev_set_uevent_suppress(&sch->dev, 0); eadm_subchannel_probe() 255 kobject_uevent(&sch->dev.kobj, KOBJ_ADD); eadm_subchannel_probe() 261 static void eadm_quiesce(struct subchannel *sch) eadm_quiesce() argument 263 struct eadm_private *private = get_eadm_private(sch); eadm_quiesce() 267 spin_lock_irq(sch->lock); eadm_quiesce() 271 if (eadm_subchannel_clear(sch)) eadm_quiesce() 275 spin_unlock_irq(sch->lock); eadm_quiesce() 279 spin_lock_irq(sch->lock); eadm_quiesce() 283 eadm_subchannel_set_timeout(sch, 0); eadm_quiesce() 285 ret = cio_disable_subchannel(sch); eadm_quiesce() 288 spin_unlock_irq(sch->lock); eadm_quiesce() 291 static int eadm_subchannel_remove(struct subchannel *sch) eadm_subchannel_remove() argument 293 struct eadm_private *private = get_eadm_private(sch); eadm_subchannel_remove() 299 eadm_quiesce(sch); eadm_subchannel_remove() 301 spin_lock_irq(sch->lock); eadm_subchannel_remove() 302 set_eadm_private(sch, NULL); eadm_subchannel_remove() 303 spin_unlock_irq(sch->lock); eadm_subchannel_remove() 310 static void eadm_subchannel_shutdown(struct subchannel *sch) eadm_subchannel_shutdown() argument 312 eadm_quiesce(sch); eadm_subchannel_shutdown() 315 static int eadm_subchannel_freeze(struct subchannel *sch) eadm_subchannel_freeze() argument 317 return cio_disable_subchannel(sch); eadm_subchannel_freeze() 320 static int eadm_subchannel_restore(struct subchannel *sch) eadm_subchannel_restore() argument 322 return cio_enable_subchannel(sch, (u32)(unsigned long)sch); eadm_subchannel_restore() 327 * @sch: subchannel 335 static int eadm_subchannel_sch_event(struct subchannel *sch, int process) eadm_subchannel_sch_event() argument 340 spin_lock_irqsave(sch->lock, flags); eadm_subchannel_sch_event() 341 if (!device_is_registered(&sch->dev)) eadm_subchannel_sch_event() 344 if (work_pending(&sch->todo_work)) eadm_subchannel_sch_event() 347 if (cio_update_schib(sch)) { eadm_subchannel_sch_event() 348 css_sched_sch_todo(sch, SCH_TODO_UNREG); eadm_subchannel_sch_event() 351 private = get_eadm_private(sch); eadm_subchannel_sch_event() 356 spin_unlock_irqrestore(sch->lock, flags); eadm_subchannel_sch_event()
|
H A D | cio.c | 90 int cio_set_options(struct subchannel *sch, int flags) cio_set_options() argument 92 struct io_subchannel_private *priv = to_io_private(sch); cio_set_options() 101 cio_start_handle_notoper(struct subchannel *sch, __u8 lpm) cio_start_handle_notoper() argument 106 sch->lpm &= ~lpm; cio_start_handle_notoper() 108 sch->lpm = 0; cio_start_handle_notoper() 111 "subchannel 0.%x.%04x!\n", sch->schid.ssid, cio_start_handle_notoper() 112 sch->schid.sch_no); cio_start_handle_notoper() 114 if (cio_update_schib(sch)) cio_start_handle_notoper() 117 sprintf(dbf_text, "no%s", dev_name(&sch->dev)); cio_start_handle_notoper() 119 CIO_HEX_EVENT(0, &sch->schib, sizeof (struct schib)); cio_start_handle_notoper() 121 return (sch->lpm ? -EACCES : -ENODEV); cio_start_handle_notoper() 125 cio_start_key (struct subchannel *sch, /* subchannel structure */ cio_start_key() argument 130 struct io_subchannel_private *priv = to_io_private(sch); cio_start_key() 135 CIO_TRACE_EVENT(5, dev_name(&sch->dev)); cio_start_key() 138 /* sch is always under 2G. */ cio_start_key() 139 orb->cmd.intparm = (u32)(addr_t)sch; cio_start_key() 145 orb->cmd.lpm = (lpm != 0) ? lpm : sch->lpm; cio_start_key() 154 ccode = ssch(sch->schid, orb); cio_start_key() 164 sch->schib.scsw.cmd.actl |= SCSW_ACTL_START_PEND; cio_start_key() 170 return cio_start_handle_notoper(sch, lpm); cio_start_key() 177 cio_start (struct subchannel *sch, struct ccw1 *cpa, __u8 lpm) cio_start() argument 179 return cio_start_key(sch, cpa, lpm, PAGE_DEFAULT_KEY); cio_start() 186 cio_resume (struct subchannel *sch) cio_resume() argument 191 CIO_TRACE_EVENT(4, dev_name(&sch->dev)); cio_resume() 193 ccode = rsch (sch->schid); cio_resume() 199 sch->schib.scsw.cmd.actl |= SCSW_ACTL_RESUME_PEND; cio_resume() 218 cio_halt(struct subchannel *sch) cio_halt() argument 222 if (!sch) cio_halt() 226 CIO_TRACE_EVENT(2, dev_name(&sch->dev)); cio_halt() 231 ccode = hsch (sch->schid); cio_halt() 237 sch->schib.scsw.cmd.actl |= SCSW_ACTL_HALT_PEND; cio_halt() 251 cio_clear(struct subchannel *sch) cio_clear() argument 255 if (!sch) cio_clear() 259 CIO_TRACE_EVENT(2, dev_name(&sch->dev)); cio_clear() 264 ccode = csch (sch->schid); cio_clear() 270 sch->schib.scsw.cmd.actl |= SCSW_ACTL_CLEAR_PEND; cio_clear() 285 cio_cancel (struct subchannel *sch) cio_cancel() argument 289 if (!sch) cio_cancel() 293 CIO_TRACE_EVENT(2, dev_name(&sch->dev)); cio_cancel() 295 ccode = xsch (sch->schid); cio_cancel() 302 if (cio_update_schib(sch)) cio_cancel() 315 static void cio_apply_config(struct subchannel *sch, struct schib *schib) cio_apply_config() argument 317 schib->pmcw.intparm = sch->config.intparm; cio_apply_config() 318 schib->pmcw.mbi = sch->config.mbi; cio_apply_config() 319 schib->pmcw.isc = sch->config.isc; cio_apply_config() 320 schib->pmcw.ena = sch->config.ena; cio_apply_config() 321 schib->pmcw.mme = sch->config.mme; cio_apply_config() 322 schib->pmcw.mp = sch->config.mp; cio_apply_config() 323 schib->pmcw.csense = sch->config.csense; cio_apply_config() 324 schib->pmcw.mbfc = sch->config.mbfc; cio_apply_config() 325 if (sch->config.mbfc) cio_apply_config() 326 schib->mba = sch->config.mba; cio_apply_config() 329 static int cio_check_config(struct subchannel *sch, struct schib *schib) cio_check_config() argument 331 return (schib->pmcw.intparm == sch->config.intparm) && cio_check_config() 332 (schib->pmcw.mbi == sch->config.mbi) && cio_check_config() 333 (schib->pmcw.isc == sch->config.isc) && cio_check_config() 334 (schib->pmcw.ena == sch->config.ena) && cio_check_config() 335 (schib->pmcw.mme == sch->config.mme) && cio_check_config() 336 (schib->pmcw.mp == sch->config.mp) && cio_check_config() 337 (schib->pmcw.csense == sch->config.csense) && cio_check_config() 338 (schib->pmcw.mbfc == sch->config.mbfc) && cio_check_config() 339 (!sch->config.mbfc || (schib->mba == sch->config.mba)); cio_check_config() 345 int cio_commit_config(struct subchannel *sch) cio_commit_config() argument 351 if (stsch_err(sch->schid, &schib) || !css_sch_is_valid(&schib)) cio_commit_config() 356 cio_apply_config(sch, &schib); cio_commit_config() 357 ccode = msch_err(sch->schid, &schib); cio_commit_config() 362 if (stsch_err(sch->schid, &schib) || cio_commit_config() 365 if (cio_check_config(sch, &schib)) { cio_commit_config() 367 memcpy(&sch->schib, &schib, sizeof(schib)); cio_commit_config() 374 if (tsch(sch->schid, &irb)) cio_commit_config() 390 * @sch: subchannel on which to perform stsch 393 int cio_update_schib(struct subchannel *sch) cio_update_schib() argument 397 if (stsch_err(sch->schid, &schib) || !css_sch_is_valid(&schib)) cio_update_schib() 400 memcpy(&sch->schib, &schib, sizeof(schib)); cio_update_schib() 407 * @sch: subchannel to be enabled 410 int cio_enable_subchannel(struct subchannel *sch, u32 intparm) cio_enable_subchannel() argument 415 CIO_TRACE_EVENT(2, dev_name(&sch->dev)); cio_enable_subchannel() 417 if (sch_is_pseudo_sch(sch)) cio_enable_subchannel() 419 if (cio_update_schib(sch)) cio_enable_subchannel() 422 sch->config.ena = 1; cio_enable_subchannel() 423 sch->config.isc = sch->isc; cio_enable_subchannel() 424 sch->config.intparm = intparm; cio_enable_subchannel() 426 ret = cio_commit_config(sch); cio_enable_subchannel() 432 sch->config.csense = 0; cio_enable_subchannel() 433 ret = cio_commit_config(sch); cio_enable_subchannel() 442 * @sch: subchannel to disable 444 int cio_disable_subchannel(struct subchannel *sch) cio_disable_subchannel() argument 449 CIO_TRACE_EVENT(2, dev_name(&sch->dev)); cio_disable_subchannel() 451 if (sch_is_pseudo_sch(sch)) cio_disable_subchannel() 453 if (cio_update_schib(sch)) cio_disable_subchannel() 456 sch->config.ena = 0; cio_disable_subchannel() 457 ret = cio_commit_config(sch); cio_disable_subchannel() 464 static int cio_check_devno_blacklisted(struct subchannel *sch) cio_check_devno_blacklisted() argument 466 if (is_blacklisted(sch->schid.ssid, sch->schib.pmcw.dev)) { cio_check_devno_blacklisted() 473 sch->schib.pmcw.dev, sch->schid.ssid); cio_check_devno_blacklisted() 481 * @sch: subchannel structure to be filled out 491 int cio_validate_subchannel(struct subchannel *sch, struct subchannel_id schid) cio_validate_subchannel() argument 506 ccode = stsch_err(schid, &sch->schib); cio_validate_subchannel() 511 sch->st = sch->schib.pmcw.st; cio_validate_subchannel() 512 sch->schid = schid; cio_validate_subchannel() 514 switch (sch->st) { cio_validate_subchannel() 517 if (!css_sch_is_valid(&sch->schib)) cio_validate_subchannel() 520 err = cio_check_devno_blacklisted(sch); cio_validate_subchannel() 529 sch->schid.ssid, sch->schid.sch_no, sch->st); cio_validate_subchannel() 540 struct subchannel *sch; do_cio_interrupt() local 546 sch = (struct subchannel *)(unsigned long) tpi_info->intparm; do_cio_interrupt() 547 if (!sch) { do_cio_interrupt() 553 spin_lock(sch->lock); do_cio_interrupt() 557 memcpy (&sch->schib.scsw, &irb->scsw, sizeof (irb->scsw)); do_cio_interrupt() 559 if (sch->driver && sch->driver->irq) do_cio_interrupt() 560 sch->driver->irq(sch); do_cio_interrupt() 565 spin_unlock(sch->lock); do_cio_interrupt() 590 void cio_tsch(struct subchannel *sch) cio_tsch() argument 597 if (tsch(sch->schid, irb) != 0) cio_tsch() 600 memcpy(&sch->schib.scsw, &irb->scsw, sizeof(union scsw)); cio_tsch() 608 if (sch->driver && sch->driver->irq) cio_tsch() 609 sch->driver->irq(sch); cio_tsch() 655 struct subchannel *sch; cio_probe_console() local 665 sch = css_alloc_subchannel(schid); cio_probe_console() 666 if (IS_ERR(sch)) cio_probe_console() 667 return sch; cio_probe_console() 669 lockdep_set_class(sch->lock, &console_sch_key); cio_probe_console() 671 sch->config.isc = CONSOLE_ISC; cio_probe_console() 672 sch->config.intparm = (u32)(addr_t)sch; cio_probe_console() 673 ret = cio_commit_config(sch); cio_probe_console() 676 put_device(&sch->dev); cio_probe_console() 679 console_sch = sch; cio_probe_console() 680 return sch; cio_probe_console() 961 * @sch: subchannel on which to perform the start function 969 int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key) cio_tm_start_key() argument 972 union orb *orb = &to_io_private(sch)->orb; cio_tm_start_key() 975 orb->tm.intparm = (u32) (addr_t) sch; cio_tm_start_key() 978 orb->tm.lpm = lpm ? lpm : sch->lpm; cio_tm_start_key() 980 cc = ssch(sch->schid, orb); cio_tm_start_key() 988 return cio_start_handle_notoper(sch, lpm); cio_tm_start_key() 994 * @sch - subchannel on which to perform the interrogate function 999 int cio_tm_intrg(struct subchannel *sch) cio_tm_intrg() argument 1003 if (!to_io_private(sch)->orb.tm.b) cio_tm_intrg() 1005 cc = xsch(sch->schid); cio_tm_intrg()
|
H A D | css.c | 67 struct subchannel *sch = to_subchannel(dev); call_fn_known_sch() local 72 idset_sch_del(cb->set, sch->schid); call_fn_known_sch() 74 rc = cb->fn_known_sch(sch, cb->data); call_fn_known_sch() 91 struct subchannel *sch; call_fn_all_sch() local 94 sch = get_subchannel_by_schid(schid); call_fn_all_sch() 95 if (sch) { call_fn_all_sch() 97 rc = cb->fn_known_sch(sch, cb->data); call_fn_all_sch() 98 put_device(&sch->dev); call_fn_all_sch() 147 static int css_sch_create_locks(struct subchannel *sch) css_sch_create_locks() argument 149 sch->lock = kmalloc(sizeof(*sch->lock), GFP_KERNEL); css_sch_create_locks() 150 if (!sch->lock) css_sch_create_locks() 153 spin_lock_init(sch->lock); css_sch_create_locks() 154 mutex_init(&sch->reg_mutex); css_sch_create_locks() 161 struct subchannel *sch = to_subchannel(dev); css_subchannel_release() local 163 sch->config.intparm = 0; css_subchannel_release() 164 cio_commit_config(sch); css_subchannel_release() 165 kfree(sch->lock); css_subchannel_release() 166 kfree(sch); css_subchannel_release() 171 struct subchannel *sch; css_alloc_subchannel() local 174 sch = kzalloc(sizeof(*sch), GFP_KERNEL | GFP_DMA); css_alloc_subchannel() 175 if (!sch) css_alloc_subchannel() 178 ret = cio_validate_subchannel(sch, schid); css_alloc_subchannel() 182 ret = css_sch_create_locks(sch); css_alloc_subchannel() 186 INIT_WORK(&sch->todo_work, css_sch_todo); css_alloc_subchannel() 187 sch->dev.release = &css_subchannel_release; css_alloc_subchannel() 188 device_initialize(&sch->dev); css_alloc_subchannel() 189 return sch; css_alloc_subchannel() 192 kfree(sch); css_alloc_subchannel() 196 static int css_sch_device_register(struct subchannel *sch) css_sch_device_register() argument 200 mutex_lock(&sch->reg_mutex); css_sch_device_register() 201 dev_set_name(&sch->dev, "0.%x.%04x", sch->schid.ssid, css_sch_device_register() 202 sch->schid.sch_no); css_sch_device_register() 203 ret = device_add(&sch->dev); css_sch_device_register() 204 mutex_unlock(&sch->reg_mutex); css_sch_device_register() 210 * @sch: subchannel to be unregistered 212 void css_sch_device_unregister(struct subchannel *sch) css_sch_device_unregister() argument 214 mutex_lock(&sch->reg_mutex); css_sch_device_unregister() 215 if (device_is_registered(&sch->dev)) css_sch_device_unregister() 216 device_unregister(&sch->dev); css_sch_device_unregister() 217 mutex_unlock(&sch->reg_mutex); css_sch_device_unregister() 250 void css_update_ssd_info(struct subchannel *sch) css_update_ssd_info() argument 254 ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info); css_update_ssd_info() 256 ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw); css_update_ssd_info() 258 ssd_register_chpids(&sch->ssd_info); css_update_ssd_info() 264 struct subchannel *sch = to_subchannel(dev); type_show() local 266 return sprintf(buf, "%01x\n", sch->st); type_show() 274 struct subchannel *sch = to_subchannel(dev); modalias_show() local 276 return sprintf(buf, "css:t%01X\n", sch->st); modalias_show() 296 int css_register_subchannel(struct subchannel *sch) css_register_subchannel() argument 301 sch->dev.parent = &channel_subsystems[0]->device; css_register_subchannel() 302 sch->dev.bus = &css_bus_type; css_register_subchannel() 303 sch->dev.groups = default_subch_attr_groups; css_register_subchannel() 313 dev_set_uevent_suppress(&sch->dev, 1); css_register_subchannel() 314 css_update_ssd_info(sch); css_register_subchannel() 316 ret = css_sch_device_register(sch); css_register_subchannel() 318 CIO_MSG_EVENT(0, "Could not register sch 0.%x.%04x: %d\n", css_register_subchannel() 319 sch->schid.ssid, sch->schid.sch_no, ret); css_register_subchannel() 322 if (!sch->driver) { css_register_subchannel() 328 dev_set_uevent_suppress(&sch->dev, 0); css_register_subchannel() 329 kobject_uevent(&sch->dev.kobj, KOBJ_ADD); css_register_subchannel() 336 struct subchannel *sch; css_probe_device() local 339 sch = css_alloc_subchannel(schid); css_probe_device() 340 if (IS_ERR(sch)) css_probe_device() 341 return PTR_ERR(sch); css_probe_device() 343 ret = css_register_subchannel(sch); css_probe_device() 345 put_device(&sch->dev); css_probe_device() 353 struct subchannel *sch; check_subchannel() local 356 sch = to_subchannel(dev); check_subchannel() 357 return schid_equal(&sch->schid, schid); check_subchannel() 401 CIO_MSG_EVENT(4, "event: sch 0.%x.%04x, new\n", schid.ssid, css_evaluate_new_subchannel() 407 static int css_evaluate_known_subchannel(struct subchannel *sch, int slow) css_evaluate_known_subchannel() argument 411 if (sch->driver) { css_evaluate_known_subchannel() 412 if (sch->driver->sch_event) css_evaluate_known_subchannel() 413 ret = sch->driver->sch_event(sch, slow); css_evaluate_known_subchannel() 415 dev_dbg(&sch->dev, css_evaluate_known_subchannel() 420 CIO_MSG_EVENT(2, "eval: sch 0.%x.%04x, rc=%d\n", css_evaluate_known_subchannel() 421 sch->schid.ssid, sch->schid.sch_no, ret); css_evaluate_known_subchannel() 428 struct subchannel *sch; css_evaluate_subchannel() local 431 sch = get_subchannel_by_schid(schid); css_evaluate_subchannel() 432 if (sch) { css_evaluate_subchannel() 433 ret = css_evaluate_known_subchannel(sch, slow); css_evaluate_subchannel() 434 put_device(&sch->dev); css_evaluate_subchannel() 443 * @sch: subchannel 450 void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo) css_sched_sch_todo() argument 452 CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n", css_sched_sch_todo() 453 sch->schid.ssid, sch->schid.sch_no, todo); css_sched_sch_todo() 454 if (sch->todo >= todo) css_sched_sch_todo() 457 if (!get_device(&sch->dev)) css_sched_sch_todo() 459 sch->todo = todo; css_sched_sch_todo() 460 if (!queue_work(cio_work_q, &sch->todo_work)) { css_sched_sch_todo() 462 put_device(&sch->dev); css_sched_sch_todo() 469 struct subchannel *sch; css_sch_todo() local 473 sch = container_of(work, struct subchannel, todo_work); css_sch_todo() 475 spin_lock_irq(sch->lock); css_sch_todo() 476 todo = sch->todo; css_sch_todo() 477 CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid, css_sch_todo() 478 sch->schid.sch_no, todo); css_sch_todo() 479 sch->todo = SCH_TODO_NOTHING; css_sch_todo() 480 spin_unlock_irq(sch->lock); css_sch_todo() 486 ret = css_evaluate_known_subchannel(sch, 1); css_sch_todo() 488 spin_lock_irq(sch->lock); css_sch_todo() 489 css_sched_sch_todo(sch, todo); css_sch_todo() 490 spin_unlock_irq(sch->lock); css_sch_todo() 494 css_sch_device_unregister(sch); css_sch_todo() 498 put_device(&sch->dev); css_sch_todo() 519 static int slow_eval_known_fn(struct subchannel *sch, void *data) slow_eval_known_fn() argument 525 eval = idset_sch_contains(slow_subchannel_set, sch->schid); slow_eval_known_fn() 526 idset_sch_del(slow_subchannel_set, sch->schid); slow_eval_known_fn() 529 rc = css_evaluate_known_subchannel(sch, 1); slow_eval_known_fn() 531 css_schedule_eval(sch->schid); slow_eval_known_fn() 613 struct subchannel *sch = to_subchannel(dev); __unset_registered() local 615 idset_sch_del(set, sch->schid); __unset_registered() 661 struct subchannel *sch; css_process_crw() local 682 sch = get_subchannel_by_schid(mchk_schid); css_process_crw() 683 if (sch) { css_process_crw() 684 css_update_ssd_info(sch); css_process_crw() 685 put_device(&sch->dev); css_process_crw() 1121 int sch_is_pseudo_sch(struct subchannel *sch) sch_is_pseudo_sch() argument 1123 return sch == to_css(sch->dev.parent)->pseudo_subchannel; sch_is_pseudo_sch() 1128 struct subchannel *sch = to_subchannel(dev); css_bus_match() local 1133 if (sch->st == id->type) css_bus_match() 1142 struct subchannel *sch; css_probe() local 1145 sch = to_subchannel(dev); css_probe() 1146 sch->driver = to_cssdriver(dev->driver); css_probe() 1147 ret = sch->driver->probe ? sch->driver->probe(sch) : 0; css_probe() 1149 sch->driver = NULL; css_probe() 1155 struct subchannel *sch; css_remove() local 1158 sch = to_subchannel(dev); css_remove() 1159 ret = sch->driver->remove ? sch->driver->remove(sch) : 0; css_remove() 1160 sch->driver = NULL; css_remove() 1166 struct subchannel *sch; css_shutdown() local 1168 sch = to_subchannel(dev); css_shutdown() 1169 if (sch->driver && sch->driver->shutdown) css_shutdown() 1170 sch->driver->shutdown(sch); css_shutdown() 1175 struct subchannel *sch = to_subchannel(dev); css_uevent() local 1178 ret = add_uevent_var(env, "ST=%01X", sch->st); css_uevent() 1181 ret = add_uevent_var(env, "MODALIAS=css:t%01X", sch->st); css_uevent() 1187 struct subchannel *sch = to_subchannel(dev); css_pm_prepare() local 1190 if (mutex_is_locked(&sch->reg_mutex)) css_pm_prepare() 1192 if (!sch->dev.driver) css_pm_prepare() 1194 drv = to_cssdriver(sch->dev.driver); css_pm_prepare() 1196 return drv->prepare ? drv->prepare(sch) : 0; css_pm_prepare() 1201 struct subchannel *sch = to_subchannel(dev); css_pm_complete() local 1204 if (!sch->dev.driver) css_pm_complete() 1206 drv = to_cssdriver(sch->dev.driver); css_pm_complete() 1208 drv->complete(sch); css_pm_complete() 1213 struct subchannel *sch = to_subchannel(dev); css_pm_freeze() local 1216 if (!sch->dev.driver) css_pm_freeze() 1218 drv = to_cssdriver(sch->dev.driver); css_pm_freeze() 1219 return drv->freeze ? drv->freeze(sch) : 0; css_pm_freeze() 1224 struct subchannel *sch = to_subchannel(dev); css_pm_thaw() local 1227 if (!sch->dev.driver) css_pm_thaw() 1229 drv = to_cssdriver(sch->dev.driver); css_pm_thaw() 1230 return drv->thaw ? drv->thaw(sch) : 0; css_pm_thaw() 1235 struct subchannel *sch = to_subchannel(dev); css_pm_restore() local 1238 css_update_ssd_info(sch); css_pm_restore() 1239 if (!sch->dev.driver) css_pm_restore() 1241 drv = to_cssdriver(sch->dev.driver); css_pm_restore() 1242 return drv->restore ? drv->restore(sch) : 0; css_pm_restore()
|
H A D | device.c | 150 static int io_subchannel_prepare(struct subchannel *sch) io_subchannel_prepare() argument 157 cdev = sch_get_cdev(sch); io_subchannel_prepare() 220 struct subchannel *sch = to_subchannel(dev); chpids_show() local 221 struct chsc_ssd_info *ssd = &sch->ssd_info; chpids_show() 240 struct subchannel *sch = to_subchannel(dev); pimpampom_show() local 241 struct pmcw *pmcw = &sch->schib.pmcw; pimpampom_show() 323 struct subchannel *sch; ccw_device_set_offline() local 337 sch = to_subchannel(cdev->dev.parent); ccw_device_set_offline() 358 io_subchannel_quiesce(sch); ccw_device_set_offline() 592 struct subchannel *sch; available_show() local 602 sch = to_subchannel(dev->parent); available_show() 603 if (!sch->lpm) available_show() 617 struct subchannel *sch = to_subchannel(dev); initiate_logging() local 620 rc = chsc_siosl(sch->schid); initiate_logging() 624 sch->schid.ssid, sch->schid.sch_no, rc); initiate_logging() 628 sch->schid.ssid, sch->schid.sch_no); initiate_logging() 635 struct subchannel *sch = to_subchannel(dev); vpm_show() local 637 return sprintf(buf, "%02x\n", sch->vpm); vpm_show() 740 static struct ccw_device * io_subchannel_allocate_dev(struct subchannel *sch) io_subchannel_allocate_dev() argument 757 static int io_subchannel_initialize_dev(struct subchannel *sch, io_subchannel_initialize_dev() argument 766 priv->dev_id.devno = sch->schib.pmcw.dev; io_subchannel_initialize_dev() 767 priv->dev_id.ssid = sch->schid.ssid; io_subchannel_initialize_dev() 768 priv->schid = sch->schid; io_subchannel_initialize_dev() 776 cdev->ccwlock = sch->lock; io_subchannel_initialize_dev() 777 cdev->dev.parent = &sch->dev; io_subchannel_initialize_dev() 786 if (!get_device(&sch->dev)) { io_subchannel_initialize_dev() 791 spin_lock_irq(sch->lock); io_subchannel_initialize_dev() 792 sch_set_cdev(sch, cdev); io_subchannel_initialize_dev() 793 spin_unlock_irq(sch->lock); io_subchannel_initialize_dev() 802 static struct ccw_device * io_subchannel_create_ccwdev(struct subchannel *sch) io_subchannel_create_ccwdev() argument 807 cdev = io_subchannel_allocate_dev(sch); io_subchannel_create_ccwdev() 809 ret = io_subchannel_initialize_dev(sch, cdev); io_subchannel_create_ccwdev() 818 static void sch_create_and_recog_new_device(struct subchannel *sch) sch_create_and_recog_new_device() argument 823 cdev = io_subchannel_create_ccwdev(sch); sch_create_and_recog_new_device() 826 css_sch_device_unregister(sch); sch_create_and_recog_new_device() 830 io_subchannel_recog(cdev, sch); sch_create_and_recog_new_device() 838 struct subchannel *sch; io_subchannel_register() local 842 sch = to_subchannel(cdev->dev.parent); io_subchannel_register() 849 if (!device_is_registered(&sch->dev)) io_subchannel_register() 851 css_update_ssd_info(sch); io_subchannel_register() 875 dev_set_uevent_suppress(&sch->dev, 0); io_subchannel_register() 876 kobject_uevent(&sch->dev.kobj, KOBJ_ADD); io_subchannel_register() 883 spin_lock_irqsave(sch->lock, flags); io_subchannel_register() 884 sch_set_cdev(sch, NULL); io_subchannel_register() 885 spin_unlock_irqrestore(sch->lock, flags); io_subchannel_register() 900 struct subchannel *sch; ccw_device_call_sch_unregister() local 905 sch = to_subchannel(cdev->dev.parent); ccw_device_call_sch_unregister() 906 css_sch_device_unregister(sch); ccw_device_call_sch_unregister() 908 put_device(&sch->dev); ccw_device_call_sch_unregister() 941 static void io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch) io_subchannel_recog() argument 947 spin_lock_irq(sch->lock); io_subchannel_recog() 949 spin_unlock_irq(sch->lock); io_subchannel_recog() 953 struct subchannel *sch) ccw_device_move_to_sch() 960 if (!get_device(&sch->dev)) ccw_device_move_to_sch() 972 put_device(&sch->dev); ccw_device_move_to_sch() 977 mutex_lock(&sch->reg_mutex); ccw_device_move_to_sch() 978 rc = device_move(&cdev->dev, &sch->dev, DPM_ORDER_PARENT_BEFORE_DEV); ccw_device_move_to_sch() 979 mutex_unlock(&sch->reg_mutex); ccw_device_move_to_sch() 983 cdev->private->dev_id.devno, sch->schid.ssid, ccw_device_move_to_sch() 984 sch->schib.pmcw.dev, rc); ccw_device_move_to_sch() 992 put_device(&sch->dev); ccw_device_move_to_sch() 1005 spin_lock_irq(sch->lock); ccw_device_move_to_sch() 1006 cdev->private->schid = sch->schid; ccw_device_move_to_sch() 1007 cdev->ccwlock = sch->lock; ccw_device_move_to_sch() 1008 if (!sch_is_pseudo_sch(sch)) ccw_device_move_to_sch() 1009 sch_set_cdev(sch, cdev); ccw_device_move_to_sch() 1010 spin_unlock_irq(sch->lock); ccw_device_move_to_sch() 1011 if (!sch_is_pseudo_sch(sch)) ccw_device_move_to_sch() 1012 css_update_ssd_info(sch); ccw_device_move_to_sch() 1018 struct subchannel *sch = to_subchannel(cdev->dev.parent); ccw_device_move_to_orph() local 1019 struct channel_subsystem *css = to_css(sch->dev.parent); ccw_device_move_to_orph() 1024 static void io_subchannel_irq(struct subchannel *sch) io_subchannel_irq() argument 1028 cdev = sch_get_cdev(sch); io_subchannel_irq() 1031 CIO_TRACE_EVENT(6, dev_name(&sch->dev)); io_subchannel_irq() 1038 void io_subchannel_init_config(struct subchannel *sch) io_subchannel_init_config() argument 1040 memset(&sch->config, 0, sizeof(sch->config)); io_subchannel_init_config() 1041 sch->config.csense = 1; io_subchannel_init_config() 1044 static void io_subchannel_init_fields(struct subchannel *sch) io_subchannel_init_fields() argument 1046 if (cio_is_console(sch->schid)) io_subchannel_init_fields() 1047 sch->opm = 0xff; io_subchannel_init_fields() 1049 sch->opm = chp_get_sch_opm(sch); io_subchannel_init_fields() 1050 sch->lpm = sch->schib.pmcw.pam & sch->opm; io_subchannel_init_fields() 1051 sch->isc = cio_is_console(sch->schid) ? CONSOLE_ISC : IO_SCH_ISC; io_subchannel_init_fields() 1055 sch->schib.pmcw.dev, sch->schid.ssid, io_subchannel_init_fields() 1056 sch->schid.sch_no, sch->schib.pmcw.pim, io_subchannel_init_fields() 1057 sch->schib.pmcw.pam, sch->schib.pmcw.pom); io_subchannel_init_fields() 1059 io_subchannel_init_config(sch); io_subchannel_init_fields() 1066 static int io_subchannel_probe(struct subchannel *sch) io_subchannel_probe() argument 1072 if (cio_is_console(sch->schid)) { io_subchannel_probe() 1073 rc = sysfs_create_group(&sch->dev.kobj, io_subchannel_probe() 1079 sch->schid.ssid, sch->schid.sch_no, rc); io_subchannel_probe() 1085 dev_set_uevent_suppress(&sch->dev, 0); io_subchannel_probe() 1086 kobject_uevent(&sch->dev.kobj, KOBJ_ADD); io_subchannel_probe() 1087 cdev = sch_get_cdev(sch); io_subchannel_probe() 1098 io_subchannel_init_fields(sch); io_subchannel_probe() 1099 rc = cio_commit_config(sch); io_subchannel_probe() 1102 rc = sysfs_create_group(&sch->dev.kobj, io_subchannel_probe() 1111 set_io_private(sch, io_priv); io_subchannel_probe() 1112 css_schedule_eval(sch->schid); io_subchannel_probe() 1116 spin_lock_irq(sch->lock); io_subchannel_probe() 1117 css_sched_sch_todo(sch, SCH_TODO_UNREG); io_subchannel_probe() 1118 spin_unlock_irq(sch->lock); io_subchannel_probe() 1123 io_subchannel_remove (struct subchannel *sch) io_subchannel_remove() argument 1125 struct io_subchannel_private *io_priv = to_io_private(sch); io_subchannel_remove() 1128 cdev = sch_get_cdev(sch); io_subchannel_remove() 1131 io_subchannel_quiesce(sch); io_subchannel_remove() 1134 sch_set_cdev(sch, NULL); io_subchannel_remove() 1135 set_io_private(sch, NULL); io_subchannel_remove() 1141 sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group); io_subchannel_remove() 1145 static void io_subchannel_verify(struct subchannel *sch) io_subchannel_verify() argument 1149 cdev = sch_get_cdev(sch); io_subchannel_verify() 1154 static void io_subchannel_terminate_path(struct subchannel *sch, u8 mask) io_subchannel_terminate_path() argument 1158 cdev = sch_get_cdev(sch); io_subchannel_terminate_path() 1161 if (cio_update_schib(sch)) io_subchannel_terminate_path() 1164 if (scsw_actl(&sch->schib.scsw) == 0 || sch->schib.pmcw.lpum != mask) io_subchannel_terminate_path() 1170 if (cio_clear(sch)) io_subchannel_terminate_path() 1181 static int io_subchannel_chp_event(struct subchannel *sch, io_subchannel_chp_event() argument 1184 struct ccw_device *cdev = sch_get_cdev(sch); io_subchannel_chp_event() 1187 mask = chp_ssd_get_mask(&sch->ssd_info, link); io_subchannel_chp_event() 1192 sch->opm &= ~mask; io_subchannel_chp_event() 1193 sch->lpm &= ~mask; io_subchannel_chp_event() 1196 io_subchannel_terminate_path(sch, mask); io_subchannel_chp_event() 1199 sch->opm |= mask; io_subchannel_chp_event() 1200 sch->lpm |= mask; io_subchannel_chp_event() 1203 io_subchannel_verify(sch); io_subchannel_chp_event() 1206 if (cio_update_schib(sch)) io_subchannel_chp_event() 1210 io_subchannel_terminate_path(sch, mask); io_subchannel_chp_event() 1213 if (cio_update_schib(sch)) io_subchannel_chp_event() 1215 sch->lpm |= mask & sch->opm; io_subchannel_chp_event() 1218 io_subchannel_verify(sch); io_subchannel_chp_event() 1224 static void io_subchannel_quiesce(struct subchannel *sch) io_subchannel_quiesce() argument 1229 spin_lock_irq(sch->lock); io_subchannel_quiesce() 1230 cdev = sch_get_cdev(sch); io_subchannel_quiesce() 1231 if (cio_is_console(sch->schid)) io_subchannel_quiesce() 1233 if (!sch->schib.pmcw.ena) io_subchannel_quiesce() 1235 ret = cio_disable_subchannel(sch); io_subchannel_quiesce() 1246 spin_unlock_irq(sch->lock); io_subchannel_quiesce() 1249 spin_lock_irq(sch->lock); io_subchannel_quiesce() 1251 ret = cio_disable_subchannel(sch); io_subchannel_quiesce() 1254 spin_unlock_irq(sch->lock); io_subchannel_quiesce() 1257 static void io_subchannel_shutdown(struct subchannel *sch) io_subchannel_shutdown() argument 1259 io_subchannel_quiesce(sch); io_subchannel_shutdown() 1382 struct subchannel *sch = to_subchannel(cdev->dev.parent); ccw_device_set_notoper() local 1385 CIO_TRACE_EVENT(2, dev_name(&sch->dev)); ccw_device_set_notoper() 1387 cio_disable_subchannel(sch); ccw_device_set_notoper() 1403 static enum io_sch_action sch_get_action(struct subchannel *sch) sch_get_action() argument 1407 cdev = sch_get_cdev(sch); sch_get_action() 1408 if (cio_update_schib(sch)) { sch_get_action() 1419 if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) { sch_get_action() 1424 if ((sch->schib.pmcw.pam & sch->opm) == 0) { sch_get_action() 1440 * @sch: subchannel 1448 static int io_subchannel_sch_event(struct subchannel *sch, int process) io_subchannel_sch_event() argument 1456 spin_lock_irqsave(sch->lock, flags); io_subchannel_sch_event() 1457 if (!device_is_registered(&sch->dev)) io_subchannel_sch_event() 1459 if (work_pending(&sch->todo_work)) io_subchannel_sch_event() 1461 cdev = sch_get_cdev(sch); io_subchannel_sch_event() 1464 action = sch_get_action(sch); io_subchannel_sch_event() 1465 CIO_MSG_EVENT(2, "event: sch 0.%x.%04x, process=%d, action=%d\n", io_subchannel_sch_event() 1466 sch->schid.ssid, sch->schid.sch_no, process, io_subchannel_sch_event() 1477 io_subchannel_verify(sch); io_subchannel_sch_event() 1508 spin_unlock_irqrestore(sch->lock, flags); io_subchannel_sch_event() 1522 spin_lock_irqsave(sch->lock, flags); io_subchannel_sch_event() 1528 sch_set_cdev(sch, NULL); io_subchannel_sch_event() 1529 spin_unlock_irqrestore(sch->lock, flags); io_subchannel_sch_event() 1541 css_sch_device_unregister(sch); io_subchannel_sch_event() 1546 dev_id.ssid = sch->schid.ssid; io_subchannel_sch_event() 1547 dev_id.devno = sch->schib.pmcw.dev; io_subchannel_sch_event() 1550 sch_create_and_recog_new_device(sch); io_subchannel_sch_event() 1553 rc = ccw_device_move_to_sch(cdev, sch); io_subchannel_sch_event() 1559 spin_lock_irqsave(sch->lock, flags); io_subchannel_sch_event() 1561 spin_unlock_irqrestore(sch->lock, flags); io_subchannel_sch_event() 1571 spin_unlock_irqrestore(sch->lock, flags); io_subchannel_sch_event() 1591 struct subchannel *sch = to_subchannel(cdev->dev.parent); ccw_device_enable_console() local 1597 io_subchannel_init_fields(sch); ccw_device_enable_console() 1598 rc = cio_commit_config(sch); ccw_device_enable_console() 1601 sch->driver = &io_subchannel_driver; ccw_device_enable_console() 1602 io_subchannel_recog(cdev, sch); ccw_device_enable_console() 1632 struct subchannel *sch; ccw_device_create_console() local 1634 sch = cio_probe_console(); ccw_device_create_console() 1635 if (IS_ERR(sch)) ccw_device_create_console() 1636 return ERR_CAST(sch); ccw_device_create_console() 1640 put_device(&sch->dev); ccw_device_create_console() 1643 set_io_private(sch, io_priv); ccw_device_create_console() 1644 cdev = io_subchannel_create_ccwdev(sch); ccw_device_create_console() 1646 put_device(&sch->dev); ccw_device_create_console() 1657 struct subchannel *sch = to_subchannel(cdev->dev.parent); ccw_device_destroy_console() local 1658 struct io_subchannel_private *io_priv = to_io_private(sch); ccw_device_destroy_console() 1660 set_io_private(sch, NULL); ccw_device_destroy_console() 1661 put_device(&sch->dev); ccw_device_destroy_console() 1676 struct subchannel *sch = to_subchannel(cdev->dev.parent); ccw_device_wait_idle() local 1679 cio_tsch(sch); ccw_device_wait_idle() 1680 if (sch->schib.scsw.cmd.actl == 0) ccw_device_wait_idle() 1832 struct subchannel *sch = to_subchannel(cdev->dev.parent); ccw_device_pm_freeze() local 1846 spin_lock_irq(sch->lock); ccw_device_pm_freeze() 1848 spin_unlock_irq(sch->lock); ccw_device_pm_freeze() 1856 spin_lock_irq(sch->lock); ccw_device_pm_freeze() 1857 ret = cio_disable_subchannel(sch); ccw_device_pm_freeze() 1858 spin_unlock_irq(sch->lock); ccw_device_pm_freeze() 1866 struct subchannel *sch = to_subchannel(cdev->dev.parent); ccw_device_pm_thaw() local 1872 spin_lock_irq(sch->lock); ccw_device_pm_thaw() 1874 ret = cio_enable_subchannel(sch, (u32)(addr_t)sch); ccw_device_pm_thaw() 1876 spin_unlock_irq(sch->lock); ccw_device_pm_thaw() 1894 struct subchannel *sch = to_subchannel(cdev->dev.parent); __ccw_device_pm_restore() local 1896 spin_lock_irq(sch->lock); __ccw_device_pm_restore() 1897 if (cio_is_console(sch->schid)) { __ccw_device_pm_restore() 1898 cio_enable_subchannel(sch, (u32)(addr_t)sch); __ccw_device_pm_restore() 1907 css_sched_sch_todo(sch, SCH_TODO_EVAL); __ccw_device_pm_restore() 1908 spin_unlock_irq(sch->lock); __ccw_device_pm_restore() 1912 sch = to_subchannel(cdev->dev.parent); __ccw_device_pm_restore() 1913 spin_lock_irq(sch->lock); __ccw_device_pm_restore() 1919 spin_unlock_irq(sch->lock); __ccw_device_pm_restore() 1922 spin_lock_irq(sch->lock); __ccw_device_pm_restore() 1926 spin_unlock_irq(sch->lock); __ccw_device_pm_restore() 1950 struct subchannel *sch; ccw_device_pm_restore() local 1954 sch = to_subchannel(cdev->dev.parent); ccw_device_pm_restore() 1955 spin_lock_irq(sch->lock); ccw_device_pm_restore() 1956 if (cio_is_console(sch->schid)) ccw_device_pm_restore() 1992 spin_unlock_irq(sch->lock); ccw_device_pm_restore() 1994 spin_lock_irq(sch->lock); ccw_device_pm_restore() 2004 spin_unlock_irq(sch->lock); ccw_device_pm_restore() 2006 spin_lock_irq(sch->lock); ccw_device_pm_restore() 2016 spin_unlock_irq(sch->lock); ccw_device_pm_restore() 2022 spin_unlock_irq(sch->lock); ccw_device_pm_restore() 2076 struct subchannel *sch; ccw_device_todo() local 2081 sch = to_subchannel(cdev->dev.parent); ccw_device_todo() 2101 if (!sch_is_pseudo_sch(sch)) ccw_device_todo() 2102 css_schedule_eval(sch->schid); ccw_device_todo() 2105 if (sch_is_pseudo_sch(sch)) ccw_device_todo() 2152 struct subchannel *sch = to_subchannel(cdev->dev.parent); ccw_device_siosl() local 2154 return chsc_siosl(sch->schid); ccw_device_siosl() 952 ccw_device_move_to_sch(struct ccw_device *cdev, struct subchannel *sch) ccw_device_move_to_sch() argument
|
H A D | device_fsm.c | 39 struct subchannel *sch; ccw_timeout_log() local 44 sch = to_subchannel(cdev->dev.parent); ccw_timeout_log() 45 private = to_io_private(sch); ccw_timeout_log() 47 cc = stsch_err(sch->schid, &schib); ccw_timeout_log() 57 dev_name(&sch->dev)); ccw_timeout_log() 59 "vpm: %02x\n", sch->lpm, sch->opm, sch->vpm); ccw_timeout_log() 138 struct subchannel *sch; ccw_device_cancel_halt_clear() local 141 sch = to_subchannel(cdev->dev.parent); ccw_device_cancel_halt_clear() 142 if (cio_update_schib(sch)) ccw_device_cancel_halt_clear() 144 if (!sch->schib.pmcw.ena) ccw_device_cancel_halt_clear() 148 if (!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_HALT_PEND) && ccw_device_cancel_halt_clear() 149 !(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_CLEAR_PEND)) { ccw_device_cancel_halt_clear() 150 if (!scsw_is_tm(&sch->schib.scsw)) { ccw_device_cancel_halt_clear() 151 ret = cio_cancel(sch); ccw_device_cancel_halt_clear() 159 if (!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_CLEAR_PEND)) { ccw_device_cancel_halt_clear() 163 ret = cio_halt(sch); ccw_device_cancel_halt_clear() 173 ret = cio_clear (sch); ccw_device_cancel_halt_clear() 205 __recover_lost_chpids(struct subchannel *sch, int old_lpm) __recover_lost_chpids() argument 213 if (!(sch->lpm & mask)) __recover_lost_chpids() 217 chpid.id = sch->schib.pmcw.chpid[i]; __recover_lost_chpids() 229 struct subchannel *sch; ccw_device_recog_done() local 232 sch = to_subchannel(cdev->dev.parent); ccw_device_recog_done() 234 if (cio_disable_subchannel(sch)) ccw_device_recog_done() 240 old_lpm = sch->lpm; ccw_device_recog_done() 243 if (cio_update_schib(sch)) ccw_device_recog_done() 246 sch->lpm = sch->schib.pmcw.pam & sch->opm; ccw_device_recog_done() 251 if (sch->lpm != old_lpm) ccw_device_recog_done() 252 __recover_lost_chpids(sch, old_lpm); ccw_device_recog_done() 354 struct subchannel *sch = to_subchannel(cdev->dev.parent); ccw_device_oper_notify() local 360 cdev->private->path_new_mask = sch->vpm; ccw_device_oper_notify() 374 struct subchannel *sch; ccw_device_done() local 376 sch = to_subchannel(cdev->dev.parent); ccw_device_done() 381 cio_disable_subchannel(sch); ccw_device_done() 391 cdev->private->dev_id.devno, sch->schid.sch_no); ccw_device_done() 399 cdev->private->dev_id.devno, sch->schid.sch_no); ccw_device_done() 409 sch->schid.sch_no); ccw_device_done() 433 struct subchannel *sch = to_subchannel(cdev->dev.parent); ccw_device_recognition() local 445 if (cio_enable_subchannel(sch, (u32) (addr_t) sch)) { ccw_device_recognition() 474 struct subchannel *sch = to_subchannel(cdev->dev.parent); ccw_device_report_path_events() local 480 if (mask & cdev->private->path_gone_mask & ~(sch->vpm)) ccw_device_report_path_events() 482 if (mask & cdev->private->path_new_mask & sch->vpm) ccw_device_report_path_events() 484 if (mask & cdev->private->pgid_reset_mask & sch->vpm) ccw_device_report_path_events() 519 struct subchannel *sch; ccw_device_verify_done() local 521 sch = to_subchannel(cdev->dev.parent); ccw_device_verify_done() 523 if (cio_update_schib(sch)) { ccw_device_verify_done() 528 sch->lpm = sch->vpm; ccw_device_verify_done() 576 struct subchannel *sch; ccw_device_online() local 582 sch = to_subchannel(cdev->dev.parent); ccw_device_online() 583 ret = cio_enable_subchannel(sch, (u32)(addr_t)sch); ccw_device_online() 619 struct subchannel *sch; ccw_device_offline() local 636 sch = to_subchannel(cdev->dev.parent); ccw_device_offline() 637 if (cio_update_schib(sch)) ccw_device_offline() 639 if (scsw_actl(&sch->schib.scsw) != 0) ccw_device_offline() 673 struct subchannel *sch = to_subchannel(cdev->dev.parent); ccw_device_offline_verify() local 675 css_schedule_eval(sch->schid); ccw_device_offline_verify() 684 struct subchannel *sch; ccw_device_online_verify() local 690 sch = to_subchannel(cdev->dev.parent); ccw_device_online_verify() 695 if (cio_update_schib(sch)) { ccw_device_online_verify() 700 if (scsw_actl(&sch->schib.scsw) != 0 || ccw_device_online_verify() 701 (scsw_stctl(&sch->schib.scsw) & SCSW_STCTL_STATUS_PEND) || ccw_device_online_verify() 722 struct subchannel *sch = to_subchannel(cdev->dev.parent); ccw_device_boxed_verify() local 725 if (cio_enable_subchannel(sch, (u32) (addr_t) sch)) ccw_device_boxed_verify() 730 css_schedule_eval(sch->schid); ccw_device_boxed_verify() 949 struct subchannel *sch; ccw_device_start_id() local 951 sch = to_subchannel(cdev->dev.parent); ccw_device_start_id() 952 if (cio_enable_subchannel(sch, (u32)(addr_t)sch) != 0) ccw_device_start_id() 961 struct subchannel *sch; ccw_device_trigger_reprobe() local 966 sch = to_subchannel(cdev->dev.parent); ccw_device_trigger_reprobe() 968 if (cio_update_schib(sch)) ccw_device_trigger_reprobe() 974 sch->lpm = sch->schib.pmcw.pam & sch->opm; ccw_device_trigger_reprobe() 979 io_subchannel_init_config(sch); ccw_device_trigger_reprobe() 980 if (cio_commit_config(sch)) ccw_device_trigger_reprobe() 984 /* Check if this is another device which appeared on the same sch. */ ccw_device_trigger_reprobe() 985 if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) ccw_device_trigger_reprobe() 986 css_schedule_eval(sch->schid); ccw_device_trigger_reprobe() 994 struct subchannel *sch; ccw_device_disabled_irq() local 996 sch = to_subchannel(cdev->dev.parent); ccw_device_disabled_irq() 1001 cio_disable_subchannel(sch); ccw_device_disabled_irq()
|
H A D | chsc_sch.c | 57 static void chsc_subchannel_irq(struct subchannel *sch) chsc_subchannel_irq() argument 59 struct chsc_private *private = dev_get_drvdata(&sch->dev); chsc_subchannel_irq() 69 CHSC_MSG(0, "Interrupt on sch 0.%x.%04x with no request\n", chsc_subchannel_irq() 70 sch->schid.ssid, sch->schid.sch_no); chsc_subchannel_irq() 75 cio_update_schib(sch); chsc_subchannel_irq() 77 put_device(&sch->dev); chsc_subchannel_irq() 80 static int chsc_subchannel_probe(struct subchannel *sch) chsc_subchannel_probe() argument 86 sch->schid.ssid, sch->schid.sch_no); chsc_subchannel_probe() 87 sch->isc = CHSC_SCH_ISC; chsc_subchannel_probe() 91 dev_set_drvdata(&sch->dev, private); chsc_subchannel_probe() 92 ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch); chsc_subchannel_probe() 95 sch->schid.ssid, sch->schid.sch_no, ret); chsc_subchannel_probe() 96 dev_set_drvdata(&sch->dev, NULL); chsc_subchannel_probe() 99 if (dev_get_uevent_suppress(&sch->dev)) { chsc_subchannel_probe() 100 dev_set_uevent_suppress(&sch->dev, 0); chsc_subchannel_probe() 101 kobject_uevent(&sch->dev.kobj, KOBJ_ADD); chsc_subchannel_probe() 107 static int chsc_subchannel_remove(struct subchannel *sch) chsc_subchannel_remove() argument 111 cio_disable_subchannel(sch); chsc_subchannel_remove() 112 private = dev_get_drvdata(&sch->dev); chsc_subchannel_remove() 113 dev_set_drvdata(&sch->dev, NULL); chsc_subchannel_remove() 116 put_device(&sch->dev); chsc_subchannel_remove() 122 static void chsc_subchannel_shutdown(struct subchannel *sch) chsc_subchannel_shutdown() argument 124 cio_disable_subchannel(sch); chsc_subchannel_shutdown() 127 static int chsc_subchannel_prepare(struct subchannel *sch) chsc_subchannel_prepare() argument 136 cc = stsch_err(sch->schid, &schib); chsc_subchannel_prepare() 142 static int chsc_subchannel_freeze(struct subchannel *sch) chsc_subchannel_freeze() argument 144 return cio_disable_subchannel(sch); chsc_subchannel_freeze() 147 static int chsc_subchannel_restore(struct subchannel *sch) chsc_subchannel_restore() argument 149 return cio_enable_subchannel(sch, (u32)(unsigned long)sch); chsc_subchannel_restore() 213 struct subchannel *sch = to_subchannel(dev); chsc_subchannel_match_next_free() local 215 return sch->schib.pmcw.ena && !scsw_fctl(&sch->schib.scsw); chsc_subchannel_match_next_free() 218 static struct subchannel *chsc_get_next_subchannel(struct subchannel *sch) chsc_get_next_subchannel() argument 223 sch ? &sch->dev : NULL, NULL, chsc_get_next_subchannel() 247 struct subchannel *sch = NULL; chsc_async() local 252 while ((sch = chsc_get_next_subchannel(sch))) { chsc_async() 253 spin_lock(sch->lock); chsc_async() 254 private = dev_get_drvdata(&sch->dev); chsc_async() 256 spin_unlock(sch->lock); chsc_async() 260 chsc_area->header.sid = sch->schid; chsc_async() 262 CHSC_LOG_HEX(2, &sch->schid, sizeof(sch->schid)); chsc_async() 271 sch->schib.scsw.cmd.fctl |= SCSW_FCTL_START_FUNC; chsc_async() 281 spin_unlock(sch->lock); chsc_async() 283 sch->schid.ssid, sch->schid.sch_no, cc); chsc_async() 286 put_device(&sch->dev); chsc_async()
|
H A D | device_pgid.c | 34 struct subchannel *sch = to_subchannel(cdev->dev.parent); verify_done() local 42 if (sch->config.mp != mpath) { verify_done() 43 sch->config.mp = mpath; verify_done() 44 rc = cio_commit_config(sch); verify_done() 49 sch->vpm); verify_done() 73 struct subchannel *sch = to_subchannel(cdev->dev.parent); nop_do() local 76 req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam & sch->opm & nop_do() 85 verify_done(cdev, sch->vpm ? 0 : -EACCES); nop_do() 105 struct subchannel *sch = to_subchannel(cdev->dev.parent); nop_callback() local 110 sch->vpm |= req->lpm; nop_callback() 168 struct subchannel *sch = to_subchannel(cdev->dev.parent); pgid_wipeout_start() local 181 req->lpm = sch->schib.pmcw.pam; pgid_wipeout_start() 195 struct subchannel *sch = to_subchannel(cdev->dev.parent); spid_do() local 204 if (req->lpm & sch->opm) spid_do() 220 verify_done(cdev, sch->vpm ? 0 : -EACCES); spid_do() 228 struct subchannel *sch = to_subchannel(cdev->dev.parent); spid_callback() local 233 sch->vpm |= req->lpm & sch->opm; spid_callback() 333 struct subchannel *sch = to_subchannel(cdev->dev.parent); pgid_to_donepm() local 345 if (sch->opm & lpm) { pgid_to_donepm() 379 struct subchannel *sch = to_subchannel(cdev->dev.parent); snid_done() local 395 sch->vpm = donepm & sch->opm; snid_done() 404 id->devno, rc, cdev->private->pgid_valid_mask, sch->vpm, snid_done() 414 verify_done(cdev, sch->vpm == 0 ? -EACCES : 0); snid_done() 453 struct subchannel *sch = to_subchannel(cdev->dev.parent); snid_do() local 457 req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam & snid_do() 510 struct subchannel *sch = to_subchannel(cdev->dev.parent); verify_start() local 514 sch->vpm = 0; verify_start() 515 sch->lpm = sch->schib.pmcw.pam; verify_start() 520 cdev->private->pgid_todo_mask = sch->schib.pmcw.pam; verify_start() 548 * paths are operational. The resulting path mask is stored in sch->vpm. 573 struct subchannel *sch = to_subchannel(cdev->dev.parent); disband_callback() local 580 if (sch->config.mp) { disband_callback() 581 sch->config.mp = 0; disband_callback() 582 rc = cio_commit_config(sch); disband_callback() 600 struct subchannel *sch = to_subchannel(cdev->dev.parent); ccw_device_disband_start() local 610 req->lpm = sch->schib.pmcw.pam & sch->opm; ccw_device_disband_start() 661 struct subchannel *sch = to_subchannel(cdev->dev.parent); ccw_device_stlck_start() local 670 req->lpm = sch->schib.pmcw.pam & sch->opm; ccw_device_stlck_start() 682 struct subchannel *sch = to_subchannel(cdev->dev.parent); ccw_device_stlck() local 697 spin_lock_irq(sch->lock); ccw_device_stlck() 698 rc = cio_enable_subchannel(sch, (u32) (addr_t) sch); ccw_device_stlck() 704 spin_unlock_irq(sch->lock); ccw_device_stlck() 708 spin_lock_irq(sch->lock); ccw_device_stlck() 710 spin_unlock_irq(sch->lock); ccw_device_stlck() 715 spin_lock_irq(sch->lock); ccw_device_stlck() 716 cio_disable_subchannel(sch); ccw_device_stlck() 719 spin_unlock_irq(sch->lock); ccw_device_stlck()
|
H A D | eadm_sch.h | 14 struct subchannel *sch; member in struct:eadm_private
|
H A D | ccwreq.c | 77 struct subchannel *sch = to_subchannel(cdev->dev.parent); ccwreq_do() local 89 rc = cio_start(sch, cp, (u8) req->mask); ccwreq_do() 105 rc = cio_clear(sch); ccwreq_do() 152 struct subchannel *sch = to_subchannel(cdev->dev.parent); ccw_request_cancel() local 159 rc = cio_clear(sch); ccw_request_cancel() 327 struct subchannel *sch = to_subchannel(cdev->dev.parent); ccw_request_timeout() local 331 if (cio_update_schib(sch)) ccw_request_timeout() 335 if ((0x80 >> chp) & sch->schib.pmcw.lpum) ccw_request_timeout() 339 scsw_cstat(&sch->schib.scsw), ccw_request_timeout() 340 scsw_dstat(&sch->schib.scsw), ccw_request_timeout() 341 sch->schid.cssid, ccw_request_timeout() 342 sch->schib.pmcw.chpid[chp]); ccw_request_timeout() 349 rc = cio_clear(sch); ccw_request_timeout()
|
H A D | device_ops.c | 140 struct subchannel *sch; ccw_device_clear() local 145 sch = to_subchannel(cdev->dev.parent); ccw_device_clear() 146 if (!sch->schib.pmcw.ena) ccw_device_clear() 154 ret = cio_clear(sch); ccw_device_clear() 188 struct subchannel *sch; ccw_device_start_key() local 193 sch = to_subchannel(cdev->dev.parent); ccw_device_start_key() 194 if (!sch->schib.pmcw.ena) ccw_device_start_key() 209 ((sch->schib.scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) && ccw_device_start_key() 210 !(sch->schib.scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS)) || ccw_device_start_key() 213 ret = cio_set_options (sch, flags); ccw_device_start_key() 218 lpm &= sch->lpm; ccw_device_start_key() 222 ret = cio_start_key (sch, cpa, lpm, key); ccw_device_start_key() 365 struct subchannel *sch; ccw_device_halt() local 370 sch = to_subchannel(cdev->dev.parent); ccw_device_halt() 371 if (!sch->schib.pmcw.ena) ccw_device_halt() 379 ret = cio_halt(sch); ccw_device_halt() 400 struct subchannel *sch; ccw_device_resume() local 404 sch = to_subchannel(cdev->dev.parent); ccw_device_resume() 405 if (!sch->schib.pmcw.ena) ccw_device_resume() 410 !(sch->schib.scsw.cmd.actl & SCSW_ACTL_SUSPENDED)) ccw_device_resume() 412 return cio_resume(sch); ccw_device_resume() 450 struct subchannel *sch; ccw_device_get_path_mask() local 455 sch = to_subchannel(cdev->dev.parent); ccw_device_get_path_mask() 456 return sch->lpm; ccw_device_get_path_mask() 470 struct subchannel *sch; ccw_device_get_chp_desc() local 473 sch = to_subchannel(cdev->dev.parent); ccw_device_get_chp_desc() 475 chpid.id = sch->schib.pmcw.chpid[chp_idx]; ccw_device_get_chp_desc() 504 struct subchannel *sch; ccw_device_tm_start_key() local 507 sch = to_subchannel(cdev->dev.parent); ccw_device_tm_start_key() 508 if (!sch->schib.pmcw.ena) ccw_device_tm_start_key() 524 lpm &= sch->lpm; ccw_device_tm_start_key() 528 rc = cio_tm_start_key(sch, tcw, lpm, key); ccw_device_tm_start_key() 608 struct subchannel *sch = to_subchannel(cdev->dev.parent); ccw_device_get_mdc() local 615 mask &= sch->lpm; ccw_device_get_mdc() 617 mask = sch->lpm; ccw_device_get_mdc() 623 chpid.id = sch->schib.pmcw.chpid[i]; ccw_device_get_mdc() 653 struct subchannel *sch = to_subchannel(cdev->dev.parent); ccw_device_tm_intrg() local 655 if (!sch->schib.pmcw.ena) ccw_device_tm_intrg() 659 if (!scsw_is_tm(&sch->schib.scsw) || ccw_device_tm_intrg() 660 !(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_START_PEND)) ccw_device_tm_intrg() 662 return cio_tm_intrg(sch); ccw_device_tm_intrg() 673 struct subchannel *sch = to_subchannel(cdev->dev.parent); ccw_device_get_schid() local 675 *schid = sch->schid; ccw_device_get_schid()
|
H A D | cio.h | 120 extern int cio_update_schib(struct subchannel *sch); 121 extern int cio_commit_config(struct subchannel *sch); 123 int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key); 124 int cio_tm_intrg(struct subchannel *sch); 131 extern void cio_tsch(struct subchannel *sch);
|
H A D | chsc.c | 89 u16 sch; /* subchannel */ member in struct:chsc_ssd_area 209 static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data) s390_subchannel_remove_chpid() argument 211 spin_lock_irq(sch->lock); s390_subchannel_remove_chpid() 212 if (sch->driver && sch->driver->chp_event) s390_subchannel_remove_chpid() 213 if (sch->driver->chp_event(sch, data, CHP_OFFLINE) != 0) s390_subchannel_remove_chpid() 215 spin_unlock_irq(sch->lock); s390_subchannel_remove_chpid() 219 sch->lpm = 0; s390_subchannel_remove_chpid() 220 spin_unlock_irq(sch->lock); s390_subchannel_remove_chpid() 221 css_schedule_eval(sch->schid); s390_subchannel_remove_chpid() 242 static int __s390_process_res_acc(struct subchannel *sch, void *data) __s390_process_res_acc() argument 244 spin_lock_irq(sch->lock); __s390_process_res_acc() 245 if (sch->driver && sch->driver->chp_event) __s390_process_res_acc() 246 sch->driver->chp_event(sch, data, CHP_ONLINE); __s390_process_res_acc() 247 spin_unlock_irq(sch->lock); __s390_process_res_acc() 710 static void __s390_subchannel_vary_chpid(struct subchannel *sch, __s390_subchannel_vary_chpid() argument 718 spin_lock_irqsave(sch->lock, flags); __s390_subchannel_vary_chpid() 719 if (sch->driver && sch->driver->chp_event) __s390_subchannel_vary_chpid() 720 sch->driver->chp_event(sch, &link, __s390_subchannel_vary_chpid() 722 spin_unlock_irqrestore(sch->lock, flags); __s390_subchannel_vary_chpid() 725 static int s390_subchannel_vary_chpid_off(struct subchannel *sch, void *data) s390_subchannel_vary_chpid_off() argument 729 __s390_subchannel_vary_chpid(sch, *chpid, 0); s390_subchannel_vary_chpid_off() 733 static int s390_subchannel_vary_chpid_on(struct subchannel *sch, void *data) s390_subchannel_vary_chpid_on() argument 737 __s390_subchannel_vary_chpid(sch, *chpid, 1); s390_subchannel_vary_chpid_on() 1319 brinfo_area->sch = schid.sch_no; chsc_pnso_brinfo()
|
H A D | device_status.c | 53 struct subchannel *sch; ccw_device_path_notoper() local 55 sch = to_subchannel(cdev->dev.parent); ccw_device_path_notoper() 56 if (cio_update_schib(sch)) ccw_device_path_notoper() 61 sch->schid.ssid, sch->schid.sch_no, ccw_device_path_notoper() 62 sch->schib.pmcw.pnom); ccw_device_path_notoper() 64 sch->lpm &= ~sch->schib.pmcw.pnom; ccw_device_path_notoper() 311 struct subchannel *sch; ccw_device_do_sense() local 315 sch = to_subchannel(cdev->dev.parent); ccw_device_do_sense() 330 sense_ccw = &to_io_private(sch)->sense_ccw; ccw_device_do_sense() 336 rc = cio_start(sch, sense_ccw, 0xff); ccw_device_do_sense()
|
H A D | io_sch.h | 26 static inline struct ccw_device *sch_get_cdev(struct subchannel *sch) sch_get_cdev() argument 28 struct io_subchannel_private *priv = to_io_private(sch); sch_get_cdev() 32 static inline void sch_set_cdev(struct subchannel *sch, sch_set_cdev() argument 35 struct io_subchannel_private *priv = to_io_private(sch); sch_set_cdev() 119 struct subchannel *sch; member in struct:ccw_device_private
|
H A D | device_id.c | 200 struct subchannel *sch = to_subchannel(cdev->dev.parent); ccw_device_sense_id_start() local 218 req->lpm = sch->schib.pmcw.pam & sch->opm; ccw_device_sense_id_start()
|
H A D | chp.h | 61 u8 chp_get_sch_opm(struct subchannel *sch);
|
H A D | cmf.c | 187 struct subchannel *sch = to_subchannel(cdev->dev.parent); set_schib() local 190 sch->config.mme = mme; set_schib() 191 sch->config.mbfc = mbfc; set_schib() 194 sch->config.mba = address; set_schib() 196 sch->config.mbi = address; set_schib() 198 ret = cio_commit_config(sch); set_schib() 303 struct subchannel *sch; cmf_copy_block() local 308 sch = to_subchannel(cdev->dev.parent); cmf_copy_block() 310 if (cio_update_schib(sch)) cmf_copy_block() 313 if (scsw_fctl(&sch->schib.scsw) & SCSW_FCTL_START_FUNC) { cmf_copy_block() 315 if ((!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_SUSPENDED)) && cmf_copy_block() 316 (scsw_actl(&sch->schib.scsw) & cmf_copy_block() 318 (!(scsw_stctl(&sch->schib.scsw) & SCSW_STCTL_SEC_STATUS))) cmf_copy_block()
|
H A D | css.h | 113 void css_update_ssd_info(struct subchannel *sch); 145 void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo);
|
H A D | device.h | 87 void io_subchannel_init_config(struct subchannel *sch);
|
H A D | chsc.h | 219 u16 sch; member in struct:chsc_pnso_area
|
H A D | chp.c | 72 * @sch: subchannel 77 u8 chp_get_sch_opm(struct subchannel *sch) chp_get_sch_opm() argument 87 chpid.id = sch->schib.pmcw.chpid[i]; chp_get_sch_opm()
|
H A D | qdio.h | 76 /* flags for st qdio sch data */
|
H A D | qdio_setup.c | 321 (ssqd->qdio_ssqd.sch != schid->sch_no)) qdio_setup_get_ssqd()
|
/linux-4.4.14/net/sched/ |
H A D | sch_fifo.c | 22 static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch) bfifo_enqueue() argument 24 if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <= sch->limit)) bfifo_enqueue() 25 return qdisc_enqueue_tail(skb, sch); bfifo_enqueue() 27 return qdisc_reshape_fail(skb, sch); bfifo_enqueue() 30 static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch) pfifo_enqueue() argument 32 if (likely(skb_queue_len(&sch->q) < sch->limit)) pfifo_enqueue() 33 return qdisc_enqueue_tail(skb, sch); pfifo_enqueue() 35 return qdisc_reshape_fail(skb, sch); pfifo_enqueue() 38 static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch) pfifo_tail_enqueue() argument 40 if (likely(skb_queue_len(&sch->q) < sch->limit)) pfifo_tail_enqueue() 41 return qdisc_enqueue_tail(skb, sch); pfifo_tail_enqueue() 44 __qdisc_queue_drop_head(sch, &sch->q); pfifo_tail_enqueue() 45 qdisc_qstats_drop(sch); pfifo_tail_enqueue() 46 qdisc_enqueue_tail(skb, sch); pfifo_tail_enqueue() 51 static int fifo_init(struct Qdisc *sch, struct nlattr *opt) fifo_init() argument 54 bool is_bfifo = sch->ops == &bfifo_qdisc_ops; fifo_init() 57 u32 limit = qdisc_dev(sch)->tx_queue_len; fifo_init() 60 limit *= psched_mtu(qdisc_dev(sch)); fifo_init() 62 sch->limit = limit; fifo_init() 69 sch->limit = ctl->limit; fifo_init() 73 bypass = sch->limit >= psched_mtu(qdisc_dev(sch)); fifo_init() 75 bypass = sch->limit >= 1; fifo_init() 78 sch->flags |= TCQ_F_CAN_BYPASS; fifo_init() 80 sch->flags &= ~TCQ_F_CAN_BYPASS; fifo_init() 84 static int fifo_dump(struct Qdisc *sch, struct sk_buff *skb) fifo_dump() argument 86 struct tc_fifo_qopt opt = { .limit = sch->limit }; fifo_dump() 163 struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops, fifo_create_dflt() argument 169 q = qdisc_create_dflt(sch->dev_queue, ops, TC_H_MAKE(sch->handle, 1)); fifo_create_dflt()
|
H A D | sch_dsmark.c | 61 static int dsmark_graft(struct Qdisc *sch, unsigned long arg, dsmark_graft() argument 64 struct dsmark_qdisc_data *p = qdisc_priv(sch); dsmark_graft() 66 pr_debug("%s(sch %p,[qdisc %p],new %p,old %p)\n", dsmark_graft() 67 __func__, sch, p, new, old); dsmark_graft() 70 new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, dsmark_graft() 71 sch->handle); dsmark_graft() 76 *old = qdisc_replace(sch, new, &p->q); dsmark_graft() 80 static struct Qdisc *dsmark_leaf(struct Qdisc *sch, unsigned long arg) dsmark_leaf() argument 82 struct dsmark_qdisc_data *p = qdisc_priv(sch); dsmark_leaf() 86 static unsigned long dsmark_get(struct Qdisc *sch, u32 classid) dsmark_get() argument 88 pr_debug("%s(sch %p,[qdisc %p],classid %x)\n", dsmark_get() 89 __func__, sch, qdisc_priv(sch), classid); dsmark_get() 94 static unsigned long dsmark_bind_filter(struct Qdisc *sch, dsmark_bind_filter() argument 97 return dsmark_get(sch, classid); dsmark_bind_filter() 100 static void dsmark_put(struct Qdisc *sch, unsigned long cl) dsmark_put() argument 112 static int dsmark_change(struct Qdisc *sch, u32 classid, u32 parent, dsmark_change() argument 115 struct dsmark_qdisc_data *p = qdisc_priv(sch); dsmark_change() 120 pr_debug("%s(sch %p,[qdisc %p],classid %x,parent %x), arg 0x%lx\n", dsmark_change() 121 __func__, sch, p, classid, parent, *arg); dsmark_change() 147 static int dsmark_delete(struct Qdisc *sch, unsigned long arg) dsmark_delete() argument 149 struct dsmark_qdisc_data *p = qdisc_priv(sch); dsmark_delete() 160 static void dsmark_walk(struct Qdisc *sch, struct qdisc_walker *walker) dsmark_walk() argument 162 struct dsmark_qdisc_data *p = qdisc_priv(sch); dsmark_walk() 165 pr_debug("%s(sch %p,[qdisc %p],walker %p)\n", dsmark_walk() 166 __func__, sch, p, walker); dsmark_walk() 175 if (walker->fn(sch, i + 1, walker) < 0) { dsmark_walk() 185 static inline struct tcf_proto __rcu **dsmark_find_tcf(struct Qdisc *sch, dsmark_find_tcf() argument 188 struct dsmark_qdisc_data *p = qdisc_priv(sch); dsmark_find_tcf() 194 static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch) dsmark_enqueue() argument 196 struct dsmark_qdisc_data *p = qdisc_priv(sch); dsmark_enqueue() 199 pr_debug("%s(skb %p,sch %p,[qdisc %p])\n", __func__, skb, sch, p); dsmark_enqueue() 224 if (TC_H_MAJ(skb->priority) == sch->handle) dsmark_enqueue() 257 qdisc_qstats_drop(sch); dsmark_enqueue() 261 qdisc_qstats_backlog_inc(sch, skb); dsmark_enqueue() 262 sch->q.qlen++; dsmark_enqueue() 267 qdisc_drop(skb, sch); dsmark_enqueue() 271 static struct sk_buff *dsmark_dequeue(struct Qdisc *sch) dsmark_dequeue() argument 273 struct dsmark_qdisc_data *p = qdisc_priv(sch); dsmark_dequeue() 277 pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p); dsmark_dequeue() 283 qdisc_bstats_update(sch, skb); dsmark_dequeue() 284 qdisc_qstats_backlog_dec(sch, skb); dsmark_dequeue() 285 sch->q.qlen--; dsmark_dequeue() 314 static struct sk_buff *dsmark_peek(struct Qdisc *sch) dsmark_peek() argument 316 struct dsmark_qdisc_data *p = qdisc_priv(sch); dsmark_peek() 318 pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p); dsmark_peek() 323 static unsigned int dsmark_drop(struct Qdisc *sch) dsmark_drop() argument 325 struct dsmark_qdisc_data *p = qdisc_priv(sch); dsmark_drop() 328 pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p); dsmark_drop() 335 sch->q.qlen--; dsmark_drop() 340 static int dsmark_init(struct Qdisc *sch, struct nlattr *opt) dsmark_init() argument 342 struct dsmark_qdisc_data *p = qdisc_priv(sch); dsmark_init() 349 pr_debug("%s(sch %p,[qdisc %p],opt %p)\n", __func__, sch, p, opt); dsmark_init() 383 p->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, sch->handle); dsmark_init() 394 static void dsmark_reset(struct Qdisc *sch) dsmark_reset() argument 396 struct dsmark_qdisc_data *p = qdisc_priv(sch); dsmark_reset() 398 pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p); dsmark_reset() 400 sch->qstats.backlog = 0; dsmark_reset() 401 sch->q.qlen = 0; dsmark_reset() 404 static void dsmark_destroy(struct Qdisc *sch) dsmark_destroy() argument 406 struct dsmark_qdisc_data *p = qdisc_priv(sch); dsmark_destroy() 408 pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p); dsmark_destroy() 416 static int dsmark_dump_class(struct Qdisc *sch, unsigned long cl, dsmark_dump_class() argument 419 struct dsmark_qdisc_data *p = qdisc_priv(sch); dsmark_dump_class() 422 pr_debug("%s(sch %p,[qdisc %p],class %ld\n", __func__, sch, p, cl); dsmark_dump_class() 427 tcm->tcm_handle = TC_H_MAKE(TC_H_MAJ(sch->handle), cl - 1); dsmark_dump_class() 444 static int dsmark_dump(struct Qdisc *sch, struct sk_buff *skb) dsmark_dump() argument 446 struct dsmark_qdisc_data *p = qdisc_priv(sch); dsmark_dump()
|
H A D | sch_multiq.c | 40 multiq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) multiq_classify() argument 42 struct multiq_sched_data *q = qdisc_priv(sch); multiq_classify() 68 multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch) multiq_enqueue() argument 73 qdisc = multiq_classify(skb, sch, &ret); multiq_enqueue() 78 qdisc_qstats_drop(sch); multiq_enqueue() 86 sch->q.qlen++; multiq_enqueue() 90 qdisc_qstats_drop(sch); multiq_enqueue() 94 static struct sk_buff *multiq_dequeue(struct Qdisc *sch) multiq_dequeue() argument 96 struct multiq_sched_data *q = qdisc_priv(sch); multiq_dequeue() 111 netdev_get_tx_queue(qdisc_dev(sch), q->curband))) { multiq_dequeue() 115 qdisc_bstats_update(sch, skb); multiq_dequeue() 116 sch->q.qlen--; multiq_dequeue() 125 static struct sk_buff *multiq_peek(struct Qdisc *sch) multiq_peek() argument 127 struct multiq_sched_data *q = qdisc_priv(sch); multiq_peek() 143 netdev_get_tx_queue(qdisc_dev(sch), curband))) { multiq_peek() 154 static unsigned int multiq_drop(struct Qdisc *sch) multiq_drop() argument 156 struct multiq_sched_data *q = qdisc_priv(sch); multiq_drop() 166 sch->q.qlen--; multiq_drop() 176 multiq_reset(struct Qdisc *sch) multiq_reset() argument 179 struct multiq_sched_data *q = qdisc_priv(sch); multiq_reset() 183 sch->q.qlen = 0; multiq_reset() 188 multiq_destroy(struct Qdisc *sch) multiq_destroy() argument 191 struct multiq_sched_data *q = qdisc_priv(sch); multiq_destroy() 200 static int multiq_tune(struct Qdisc *sch, struct nlattr *opt) multiq_tune() argument 202 struct multiq_sched_data *q = qdisc_priv(sch); multiq_tune() 206 if (!netif_is_multiqueue(qdisc_dev(sch))) multiq_tune() 213 qopt->bands = qdisc_dev(sch)->real_num_tx_queues; multiq_tune() 215 sch_tree_lock(sch); multiq_tune() 227 sch_tree_unlock(sch); multiq_tune() 232 child = qdisc_create_dflt(sch->dev_queue, multiq_tune() 234 TC_H_MAKE(sch->handle, multiq_tune() 237 sch_tree_lock(sch); multiq_tune() 247 sch_tree_unlock(sch); multiq_tune() 254 static int multiq_init(struct Qdisc *sch, struct nlattr *opt) multiq_init() argument 256 struct multiq_sched_data *q = qdisc_priv(sch); multiq_init() 264 q->max_bands = qdisc_dev(sch)->num_tx_queues; multiq_init() 272 err = multiq_tune(sch, opt); multiq_init() 280 static int multiq_dump(struct Qdisc *sch, struct sk_buff *skb) multiq_dump() argument 282 struct multiq_sched_data *q = qdisc_priv(sch); multiq_dump() 299 static int multiq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, multiq_graft() argument 302 struct multiq_sched_data *q = qdisc_priv(sch); multiq_graft() 308 *old = qdisc_replace(sch, new, &q->queues[band]); multiq_graft() 313 multiq_leaf(struct Qdisc *sch, unsigned long arg) multiq_leaf() argument 315 struct multiq_sched_data *q = qdisc_priv(sch); multiq_leaf() 321 static unsigned long multiq_get(struct Qdisc *sch, u32 classid) multiq_get() argument 323 struct multiq_sched_data *q = qdisc_priv(sch); multiq_get() 331 static unsigned long multiq_bind(struct Qdisc *sch, unsigned long parent, multiq_bind() argument 334 return multiq_get(sch, classid); multiq_bind() 342 static int multiq_dump_class(struct Qdisc *sch, unsigned long cl, multiq_dump_class() argument 345 struct multiq_sched_data *q = qdisc_priv(sch); multiq_dump_class() 352 static int multiq_dump_class_stats(struct Qdisc *sch, unsigned long cl, multiq_dump_class_stats() argument 355 struct multiq_sched_data *q = qdisc_priv(sch); multiq_dump_class_stats() 366 static void multiq_walk(struct Qdisc *sch, struct qdisc_walker *arg) multiq_walk() argument 368 struct multiq_sched_data *q = qdisc_priv(sch); multiq_walk() 379 if (arg->fn(sch, band + 1, arg) < 0) { multiq_walk() 387 static struct tcf_proto __rcu **multiq_find_tcf(struct Qdisc *sch, multiq_find_tcf() argument 390 struct multiq_sched_data *q = qdisc_priv(sch); multiq_find_tcf()
|
H A D | sch_prio.c | 34 prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) prio_classify() argument 36 struct prio_sched_data *q = qdisc_priv(sch); prio_classify() 43 if (TC_H_MAJ(skb->priority) != sch->handle) { prio_classify() 70 prio_enqueue(struct sk_buff *skb, struct Qdisc *sch) prio_enqueue() argument 75 qdisc = prio_classify(skb, sch, &ret); prio_enqueue() 80 qdisc_qstats_drop(sch); prio_enqueue() 88 sch->q.qlen++; prio_enqueue() 92 qdisc_qstats_drop(sch); prio_enqueue() 96 static struct sk_buff *prio_peek(struct Qdisc *sch) prio_peek() argument 98 struct prio_sched_data *q = qdisc_priv(sch); prio_peek() 110 static struct sk_buff *prio_dequeue(struct Qdisc *sch) prio_dequeue() argument 112 struct prio_sched_data *q = qdisc_priv(sch); prio_dequeue() 119 qdisc_bstats_update(sch, skb); prio_dequeue() 120 sch->q.qlen--; prio_dequeue() 128 static unsigned int prio_drop(struct Qdisc *sch) prio_drop() argument 130 struct prio_sched_data *q = qdisc_priv(sch); prio_drop() 138 sch->q.qlen--; prio_drop() 147 prio_reset(struct Qdisc *sch) prio_reset() argument 150 struct prio_sched_data *q = qdisc_priv(sch); prio_reset() 154 sch->q.qlen = 0; prio_reset() 158 prio_destroy(struct Qdisc *sch) prio_destroy() argument 161 struct prio_sched_data *q = qdisc_priv(sch); prio_destroy() 168 static int prio_tune(struct Qdisc *sch, struct nlattr *opt) prio_tune() argument 170 struct prio_sched_data *q = qdisc_priv(sch); prio_tune() 186 sch_tree_lock(sch); prio_tune() 198 sch_tree_unlock(sch); prio_tune() 204 child = qdisc_create_dflt(sch->dev_queue, prio_tune() 206 TC_H_MAKE(sch->handle, i + 1)); prio_tune() 208 sch_tree_lock(sch); prio_tune() 218 sch_tree_unlock(sch); prio_tune() 225 static int prio_init(struct Qdisc *sch, struct nlattr *opt) prio_init() argument 227 struct prio_sched_data *q = qdisc_priv(sch); prio_init() 238 if ((err = prio_tune(sch, opt)) != 0) prio_init() 244 static int prio_dump(struct Qdisc *sch, struct sk_buff *skb) prio_dump() argument 246 struct prio_sched_data *q = qdisc_priv(sch); prio_dump() 263 static int prio_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, prio_graft() argument 266 struct prio_sched_data *q = qdisc_priv(sch); prio_graft() 272 *old = qdisc_replace(sch, new, &q->queues[band]); prio_graft() 277 prio_leaf(struct Qdisc *sch, unsigned long arg) prio_leaf() argument 279 struct prio_sched_data *q = qdisc_priv(sch); prio_leaf() 285 static unsigned long prio_get(struct Qdisc *sch, u32 classid) prio_get() argument 287 struct prio_sched_data *q = qdisc_priv(sch); prio_get() 295 static unsigned long prio_bind(struct Qdisc *sch, unsigned long parent, u32 classid) prio_bind() argument 297 return prio_get(sch, classid); prio_bind() 305 static int prio_dump_class(struct Qdisc *sch, unsigned long cl, struct sk_buff *skb, prio_dump_class() argument 308 struct prio_sched_data *q = qdisc_priv(sch); prio_dump_class() 315 static int prio_dump_class_stats(struct Qdisc *sch, unsigned long cl, prio_dump_class_stats() argument 318 struct prio_sched_data *q = qdisc_priv(sch); prio_dump_class_stats() 329 static void prio_walk(struct Qdisc *sch, struct qdisc_walker *arg) prio_walk() argument 331 struct prio_sched_data *q = qdisc_priv(sch); prio_walk() 342 if (arg->fn(sch, prio + 1, arg) < 0) { prio_walk() 350 static struct tcf_proto __rcu **prio_find_tcf(struct Qdisc *sch, prio_find_tcf() argument 353 struct prio_sched_data *q = qdisc_priv(sch); prio_find_tcf()
|
H A D | sch_mq.c | 25 static void mq_destroy(struct Qdisc *sch) mq_destroy() argument 27 struct net_device *dev = qdisc_dev(sch); mq_destroy() 28 struct mq_sched *priv = qdisc_priv(sch); mq_destroy() 38 static int mq_init(struct Qdisc *sch, struct nlattr *opt) mq_init() argument 40 struct net_device *dev = qdisc_dev(sch); mq_init() 41 struct mq_sched *priv = qdisc_priv(sch); mq_init() 46 if (sch->parent != TC_H_ROOT) mq_init() 61 TC_H_MAKE(TC_H_MAJ(sch->handle), mq_init() 69 sch->flags |= TCQ_F_MQROOT; mq_init() 73 mq_destroy(sch); mq_init() 77 static void mq_attach(struct Qdisc *sch) mq_attach() argument 79 struct net_device *dev = qdisc_dev(sch); mq_attach() 80 struct mq_sched *priv = qdisc_priv(sch); mq_attach() 99 static int mq_dump(struct Qdisc *sch, struct sk_buff *skb) mq_dump() argument 101 struct net_device *dev = qdisc_dev(sch); mq_dump() 105 sch->q.qlen = 0; mq_dump() 106 memset(&sch->bstats, 0, sizeof(sch->bstats)); mq_dump() 107 memset(&sch->qstats, 0, sizeof(sch->qstats)); mq_dump() 112 sch->q.qlen += qdisc->q.qlen; mq_dump() 113 sch->bstats.bytes += qdisc->bstats.bytes; mq_dump() 114 sch->bstats.packets += qdisc->bstats.packets; mq_dump() 115 sch->qstats.backlog += qdisc->qstats.backlog; mq_dump() 116 sch->qstats.drops += qdisc->qstats.drops; mq_dump() 117 sch->qstats.requeues += qdisc->qstats.requeues; mq_dump() 118 sch->qstats.overlimits += qdisc->qstats.overlimits; mq_dump() 124 static struct netdev_queue *mq_queue_get(struct Qdisc *sch, unsigned long cl) mq_queue_get() argument 126 struct net_device *dev = qdisc_dev(sch); mq_queue_get() 134 static struct netdev_queue *mq_select_queue(struct Qdisc *sch, mq_select_queue() argument 138 struct netdev_queue *dev_queue = mq_queue_get(sch, ntx); mq_select_queue() 141 struct net_device *dev = qdisc_dev(sch); mq_select_queue() 148 static int mq_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new, mq_graft() argument 151 struct netdev_queue *dev_queue = mq_queue_get(sch, cl); mq_graft() 152 struct net_device *dev = qdisc_dev(sch); mq_graft() 165 static struct Qdisc *mq_leaf(struct Qdisc *sch, unsigned long cl) mq_leaf() argument 167 struct netdev_queue *dev_queue = mq_queue_get(sch, cl); mq_leaf() 172 static unsigned long mq_get(struct Qdisc *sch, u32 classid) mq_get() argument 176 if (!mq_queue_get(sch, ntx)) mq_get() 181 static void mq_put(struct Qdisc *sch, unsigned long cl) mq_put() argument 185 static int mq_dump_class(struct Qdisc *sch, unsigned long cl, mq_dump_class() argument 188 struct netdev_queue *dev_queue = mq_queue_get(sch, cl); mq_dump_class() 196 static int mq_dump_class_stats(struct Qdisc *sch, unsigned long cl, mq_dump_class_stats() argument 199 struct netdev_queue *dev_queue = mq_queue_get(sch, cl); mq_dump_class_stats() 201 sch = dev_queue->qdisc_sleeping; mq_dump_class_stats() 202 if (gnet_stats_copy_basic(d, NULL, &sch->bstats) < 0 || mq_dump_class_stats() 203 gnet_stats_copy_queue(d, NULL, &sch->qstats, sch->q.qlen) < 0) mq_dump_class_stats() 208 static void mq_walk(struct Qdisc *sch, struct qdisc_walker *arg) mq_walk() argument 210 struct net_device *dev = qdisc_dev(sch); mq_walk() 218 if (arg->fn(sch, ntx + 1, arg) < 0) { mq_walk()
|
H A D | sch_red.c | 59 static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch) red_enqueue() argument 61 struct red_sched_data *q = qdisc_priv(sch); red_enqueue() 77 qdisc_qstats_overlimit(sch); red_enqueue() 87 qdisc_qstats_overlimit(sch); red_enqueue() 100 sch->q.qlen++; red_enqueue() 103 qdisc_qstats_drop(sch); red_enqueue() 108 qdisc_drop(skb, sch); red_enqueue() 112 static struct sk_buff *red_dequeue(struct Qdisc *sch) red_dequeue() argument 115 struct red_sched_data *q = qdisc_priv(sch); red_dequeue() 120 qdisc_bstats_update(sch, skb); red_dequeue() 121 sch->q.qlen--; red_dequeue() 129 static struct sk_buff *red_peek(struct Qdisc *sch) red_peek() argument 131 struct red_sched_data *q = qdisc_priv(sch); red_peek() 137 static unsigned int red_drop(struct Qdisc *sch) red_drop() argument 139 struct red_sched_data *q = qdisc_priv(sch); red_drop() 145 qdisc_qstats_drop(sch); red_drop() 146 sch->q.qlen--; red_drop() 156 static void red_reset(struct Qdisc *sch) red_reset() argument 158 struct red_sched_data *q = qdisc_priv(sch); red_reset() 161 sch->q.qlen = 0; red_reset() 165 static void red_destroy(struct Qdisc *sch) red_destroy() argument 167 struct red_sched_data *q = qdisc_priv(sch); red_destroy() 179 static int red_change(struct Qdisc *sch, struct nlattr *opt) red_change() argument 181 struct red_sched_data *q = qdisc_priv(sch); red_change() 204 child = fifo_create_dflt(sch, &bfifo_qdisc_ops, ctl->limit); red_change() 209 sch_tree_lock(sch); red_change() 233 sch_tree_unlock(sch); red_change() 239 struct Qdisc *sch = (struct Qdisc *)arg; red_adaptative_timer() local 240 struct red_sched_data *q = qdisc_priv(sch); red_adaptative_timer() 241 spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch)); red_adaptative_timer() 249 static int red_init(struct Qdisc *sch, struct nlattr *opt) red_init() argument 251 struct red_sched_data *q = qdisc_priv(sch); red_init() 254 setup_timer(&q->adapt_timer, red_adaptative_timer, (unsigned long)sch); red_init() 255 return red_change(sch, opt); red_init() 258 static int red_dump(struct Qdisc *sch, struct sk_buff *skb) red_dump() argument 260 struct red_sched_data *q = qdisc_priv(sch); red_dump() 272 sch->qstats.backlog = q->qdisc->qstats.backlog; red_dump() 286 static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d) red_dump_stats() argument 288 struct red_sched_data *q = qdisc_priv(sch); red_dump_stats() 299 static int red_dump_class(struct Qdisc *sch, unsigned long cl, red_dump_class() argument 302 struct red_sched_data *q = qdisc_priv(sch); red_dump_class() 309 static int red_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, red_graft() argument 312 struct red_sched_data *q = qdisc_priv(sch); red_graft() 317 *old = qdisc_replace(sch, new, &q->qdisc); red_graft() 321 static struct Qdisc *red_leaf(struct Qdisc *sch, unsigned long arg) red_leaf() argument 323 struct red_sched_data *q = qdisc_priv(sch); red_leaf() 327 static unsigned long red_get(struct Qdisc *sch, u32 classid) red_get() argument 332 static void red_put(struct Qdisc *sch, unsigned long arg) red_put() argument 336 static void red_walk(struct Qdisc *sch, struct qdisc_walker *walker) red_walk() argument 340 if (walker->fn(sch, 1, walker) < 0) { red_walk()
|
H A D | sch_drr.c | 42 static struct drr_class *drr_find_class(struct Qdisc *sch, u32 classid) drr_find_class() argument 44 struct drr_sched *q = qdisc_priv(sch); drr_find_class() 66 static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid, drr_change_class() argument 69 struct drr_sched *q = qdisc_priv(sch); drr_change_class() 88 quantum = psched_mtu(qdisc_dev(sch)); drr_change_class() 94 qdisc_root_sleeping_lock(sch), drr_change_class() 100 sch_tree_lock(sch); drr_change_class() 103 sch_tree_unlock(sch); drr_change_class() 115 cl->qdisc = qdisc_create_dflt(sch->dev_queue, drr_change_class() 122 qdisc_root_sleeping_lock(sch), drr_change_class() 131 sch_tree_lock(sch); drr_change_class() 133 sch_tree_unlock(sch); drr_change_class() 135 qdisc_class_hash_grow(sch, &q->clhash); drr_change_class() 141 static void drr_destroy_class(struct Qdisc *sch, struct drr_class *cl) drr_destroy_class() argument 148 static int drr_delete_class(struct Qdisc *sch, unsigned long arg) drr_delete_class() argument 150 struct drr_sched *q = qdisc_priv(sch); drr_delete_class() 156 sch_tree_lock(sch); drr_delete_class() 167 sch_tree_unlock(sch); drr_delete_class() 171 static unsigned long drr_get_class(struct Qdisc *sch, u32 classid) drr_get_class() argument 173 struct drr_class *cl = drr_find_class(sch, classid); drr_get_class() 181 static void drr_put_class(struct Qdisc *sch, unsigned long arg) drr_put_class() argument 186 drr_destroy_class(sch, cl); drr_put_class() 189 static struct tcf_proto __rcu **drr_tcf_chain(struct Qdisc *sch, drr_tcf_chain() argument 192 struct drr_sched *q = qdisc_priv(sch); drr_tcf_chain() 200 static unsigned long drr_bind_tcf(struct Qdisc *sch, unsigned long parent, drr_bind_tcf() argument 203 struct drr_class *cl = drr_find_class(sch, classid); drr_bind_tcf() 211 static void drr_unbind_tcf(struct Qdisc *sch, unsigned long arg) drr_unbind_tcf() argument 218 static int drr_graft_class(struct Qdisc *sch, unsigned long arg, drr_graft_class() argument 224 new = qdisc_create_dflt(sch->dev_queue, drr_graft_class() 230 *old = qdisc_replace(sch, new, &cl->qdisc); drr_graft_class() 234 static struct Qdisc *drr_class_leaf(struct Qdisc *sch, unsigned long arg) drr_class_leaf() argument 249 static int drr_dump_class(struct Qdisc *sch, unsigned long arg, drr_dump_class() argument 271 static int drr_dump_class_stats(struct Qdisc *sch, unsigned long arg, drr_dump_class_stats() argument 290 static void drr_walk(struct Qdisc *sch, struct qdisc_walker *arg) drr_walk() argument 292 struct drr_sched *q = qdisc_priv(sch); drr_walk() 305 if (arg->fn(sch, (unsigned long)cl, arg) < 0) { drr_walk() 314 static struct drr_class *drr_classify(struct sk_buff *skb, struct Qdisc *sch, drr_classify() argument 317 struct drr_sched *q = qdisc_priv(sch); drr_classify() 323 if (TC_H_MAJ(skb->priority ^ sch->handle) == 0) { drr_classify() 324 cl = drr_find_class(sch, skb->priority); drr_classify() 344 cl = drr_find_class(sch, res.classid); drr_classify() 350 static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch) drr_enqueue() argument 352 struct drr_sched *q = qdisc_priv(sch); drr_enqueue() 356 cl = drr_classify(skb, sch, &err); drr_enqueue() 359 qdisc_qstats_drop(sch); drr_enqueue() 368 qdisc_qstats_drop(sch); drr_enqueue() 378 sch->q.qlen++; drr_enqueue() 382 static struct sk_buff *drr_dequeue(struct Qdisc *sch) drr_dequeue() argument 384 struct drr_sched *q = qdisc_priv(sch); drr_dequeue() 407 qdisc_bstats_update(sch, skb); drr_dequeue() 408 sch->q.qlen--; drr_dequeue() 419 static unsigned int drr_drop(struct Qdisc *sch) drr_drop() argument 421 struct drr_sched *q = qdisc_priv(sch); drr_drop() 429 sch->q.qlen--; drr_drop() 439 static int drr_init_qdisc(struct Qdisc *sch, struct nlattr *opt) drr_init_qdisc() argument 441 struct drr_sched *q = qdisc_priv(sch); drr_init_qdisc() 451 static void drr_reset_qdisc(struct Qdisc *sch) drr_reset_qdisc() argument 453 struct drr_sched *q = qdisc_priv(sch); drr_reset_qdisc() 464 sch->q.qlen = 0; drr_reset_qdisc() 467 static void drr_destroy_qdisc(struct Qdisc *sch) drr_destroy_qdisc() argument 469 struct drr_sched *q = qdisc_priv(sch); drr_destroy_qdisc() 479 drr_destroy_class(sch, cl); drr_destroy_qdisc()
|
H A D | sch_codel.c | 67 static struct sk_buff *dequeue(struct codel_vars *vars, struct Qdisc *sch) dequeue() argument 69 struct sk_buff *skb = __skb_dequeue(&sch->q); dequeue() 75 static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch) codel_qdisc_dequeue() argument 77 struct codel_sched_data *q = qdisc_priv(sch); codel_qdisc_dequeue() 80 skb = codel_dequeue(sch, &q->params, &q->vars, &q->stats, dequeue); codel_qdisc_dequeue() 85 if (q->stats.drop_count && sch->q.qlen) { codel_qdisc_dequeue() 86 qdisc_tree_reduce_backlog(sch, q->stats.drop_count, q->stats.drop_len); codel_qdisc_dequeue() 91 qdisc_bstats_update(sch, skb); codel_qdisc_dequeue() 95 static int codel_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch) codel_qdisc_enqueue() argument 99 if (likely(qdisc_qlen(sch) < sch->limit)) { codel_qdisc_enqueue() 101 return qdisc_enqueue_tail(skb, sch); codel_qdisc_enqueue() 103 q = qdisc_priv(sch); codel_qdisc_enqueue() 105 return qdisc_drop(skb, sch); codel_qdisc_enqueue() 116 static int codel_change(struct Qdisc *sch, struct nlattr *opt) codel_change() argument 118 struct codel_sched_data *q = qdisc_priv(sch); codel_change() 130 sch_tree_lock(sch); codel_change() 151 sch->limit = nla_get_u32(tb[TCA_CODEL_LIMIT]); codel_change() 156 qlen = sch->q.qlen; codel_change() 157 while (sch->q.qlen > sch->limit) { codel_change() 158 struct sk_buff *skb = __skb_dequeue(&sch->q); codel_change() 161 qdisc_qstats_backlog_dec(sch, skb); codel_change() 162 qdisc_drop(skb, sch); codel_change() 164 qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped); codel_change() 166 sch_tree_unlock(sch); codel_change() 170 static int codel_init(struct Qdisc *sch, struct nlattr *opt) codel_init() argument 172 struct codel_sched_data *q = qdisc_priv(sch); codel_init() 174 sch->limit = DEFAULT_CODEL_LIMIT; codel_init() 176 codel_params_init(&q->params, sch); codel_init() 181 int err = codel_change(sch, opt); codel_init() 187 if (sch->limit >= 1) codel_init() 188 sch->flags |= TCQ_F_CAN_BYPASS; codel_init() 190 sch->flags &= ~TCQ_F_CAN_BYPASS; codel_init() 195 static int codel_dump(struct Qdisc *sch, struct sk_buff *skb) codel_dump() argument 197 struct codel_sched_data *q = qdisc_priv(sch); codel_dump() 207 sch->limit) || codel_dump() 224 static int codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d) codel_dump_stats() argument 226 const struct codel_sched_data *q = qdisc_priv(sch); codel_dump_stats() 250 static void codel_reset(struct Qdisc *sch) codel_reset() argument 252 struct codel_sched_data *q = qdisc_priv(sch); codel_reset() 254 qdisc_reset_queue(sch); codel_reset()
|
H A D | sch_fq_codel.c | 59 u32 quantum; /* psched_mtu(qdisc_dev(sch)); */ 77 static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch, fq_codel_classify() argument 80 struct fq_codel_sched_data *q = qdisc_priv(sch); fq_codel_classify() 85 if (TC_H_MAJ(skb->priority) == sch->handle && fq_codel_classify() 136 static unsigned int fq_codel_drop(struct Qdisc *sch) fq_codel_drop() argument 138 struct fq_codel_sched_data *q = qdisc_priv(sch); fq_codel_drop() 158 sch->q.qlen--; fq_codel_drop() 159 qdisc_qstats_drop(sch); fq_codel_drop() 160 qdisc_qstats_backlog_dec(sch, skb); fq_codel_drop() 166 static unsigned int fq_codel_qdisc_drop(struct Qdisc *sch) fq_codel_qdisc_drop() argument 170 prev_backlog = sch->qstats.backlog; fq_codel_qdisc_drop() 171 fq_codel_drop(sch); fq_codel_qdisc_drop() 172 return prev_backlog - sch->qstats.backlog; fq_codel_qdisc_drop() 175 static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch) fq_codel_enqueue() argument 177 struct fq_codel_sched_data *q = qdisc_priv(sch); fq_codel_enqueue() 182 idx = fq_codel_classify(skb, sch, &ret); fq_codel_enqueue() 185 qdisc_qstats_drop(sch); fq_codel_enqueue() 195 qdisc_qstats_backlog_inc(sch, skb); fq_codel_enqueue() 203 if (++sch->q.qlen <= sch->limit) fq_codel_enqueue() 206 prev_backlog = sch->qstats.backlog; fq_codel_enqueue() 211 if (fq_codel_drop(sch) == idx) fq_codel_enqueue() 215 qdisc_tree_reduce_backlog(sch, 1, prev_backlog - sch->qstats.backlog); fq_codel_enqueue() 223 static struct sk_buff *dequeue(struct codel_vars *vars, struct Qdisc *sch) dequeue() argument 225 struct fq_codel_sched_data *q = qdisc_priv(sch); dequeue() 233 sch->q.qlen--; dequeue() 238 static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch) fq_codel_dequeue() argument 240 struct fq_codel_sched_data *q = qdisc_priv(sch); fq_codel_dequeue() 264 prev_backlog = sch->qstats.backlog; fq_codel_dequeue() 266 skb = codel_dequeue(sch, &q->cparams, &flow->cvars, &q->cstats, fq_codel_dequeue() 280 qdisc_bstats_update(sch, skb); fq_codel_dequeue() 285 if (q->cstats.drop_count && sch->q.qlen) { fq_codel_dequeue() 286 qdisc_tree_reduce_backlog(sch, q->cstats.drop_count, fq_codel_dequeue() 294 static void fq_codel_reset(struct Qdisc *sch) fq_codel_reset() argument 296 struct fq_codel_sched_data *q = qdisc_priv(sch); fq_codel_reset() 307 qdisc_qstats_backlog_dec(sch, skb); fq_codel_reset() 315 sch->q.qlen = 0; fq_codel_reset() 328 static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt) fq_codel_change() argument 330 struct fq_codel_sched_data *q = qdisc_priv(sch); fq_codel_change() 348 sch_tree_lock(sch); fq_codel_change() 369 sch->limit = nla_get_u32(tb[TCA_FQ_CODEL_LIMIT]); fq_codel_change() 377 while (sch->q.qlen > sch->limit) { fq_codel_change() 378 struct sk_buff *skb = fq_codel_dequeue(sch); fq_codel_change() 384 qdisc_tree_reduce_backlog(sch, q->cstats.drop_count, q->cstats.drop_len); fq_codel_change() 388 sch_tree_unlock(sch); fq_codel_change() 406 static void fq_codel_destroy(struct Qdisc *sch) fq_codel_destroy() argument 408 struct fq_codel_sched_data *q = qdisc_priv(sch); fq_codel_destroy() 415 static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt) fq_codel_init() argument 417 struct fq_codel_sched_data *q = qdisc_priv(sch); fq_codel_init() 420 sch->limit = 10*1024; fq_codel_init() 422 q->quantum = psched_mtu(qdisc_dev(sch)); fq_codel_init() 426 codel_params_init(&q->cparams, sch); fq_codel_init() 431 int err = fq_codel_change(sch, opt); fq_codel_init() 453 if (sch->limit >= 1) fq_codel_init() 454 sch->flags |= TCQ_F_CAN_BYPASS; fq_codel_init() 456 sch->flags &= ~TCQ_F_CAN_BYPASS; fq_codel_init() 460 static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb) fq_codel_dump() argument 462 struct fq_codel_sched_data *q = qdisc_priv(sch); fq_codel_dump() 472 sch->limit) || fq_codel_dump() 494 static int fq_codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d) fq_codel_dump_stats() argument 496 struct fq_codel_sched_data *q = qdisc_priv(sch); fq_codel_dump_stats() 517 static struct Qdisc *fq_codel_leaf(struct Qdisc *sch, unsigned long arg) fq_codel_leaf() argument 522 static unsigned long fq_codel_get(struct Qdisc *sch, u32 classid) fq_codel_get() argument 527 static unsigned long fq_codel_bind(struct Qdisc *sch, unsigned long parent, fq_codel_bind() argument 531 sch->flags &= ~TCQ_F_CAN_BYPASS; fq_codel_bind() 539 static struct tcf_proto __rcu **fq_codel_find_tcf(struct Qdisc *sch, fq_codel_find_tcf() argument 542 struct fq_codel_sched_data *q = qdisc_priv(sch); fq_codel_find_tcf() 549 static int fq_codel_dump_class(struct Qdisc *sch, unsigned long cl, fq_codel_dump_class() argument 556 static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl, fq_codel_dump_class_stats() argument 559 struct fq_codel_sched_data *q = qdisc_priv(sch); fq_codel_dump_class_stats() 598 static void fq_codel_walk(struct Qdisc *sch, struct qdisc_walker *arg) fq_codel_walk() argument 600 struct fq_codel_sched_data *q = qdisc_priv(sch); fq_codel_walk() 612 if (arg->fn(sch, i + 1, arg) < 0) { fq_codel_walk()
|
H A D | sch_ingress.c | 19 static struct Qdisc *ingress_leaf(struct Qdisc *sch, unsigned long arg) ingress_leaf() argument 24 static unsigned long ingress_get(struct Qdisc *sch, u32 classid) ingress_get() argument 29 static unsigned long ingress_bind_filter(struct Qdisc *sch, ingress_bind_filter() argument 32 return ingress_get(sch, classid); ingress_bind_filter() 35 static void ingress_put(struct Qdisc *sch, unsigned long cl) ingress_put() argument 39 static void ingress_walk(struct Qdisc *sch, struct qdisc_walker *walker) ingress_walk() argument 43 static struct tcf_proto __rcu **ingress_find_tcf(struct Qdisc *sch, ingress_find_tcf() argument 46 struct net_device *dev = qdisc_dev(sch); ingress_find_tcf() 51 static int ingress_init(struct Qdisc *sch, struct nlattr *opt) ingress_init() argument 54 sch->flags |= TCQ_F_CPUSTATS; ingress_init() 59 static void ingress_destroy(struct Qdisc *sch) ingress_destroy() argument 61 struct net_device *dev = qdisc_dev(sch); ingress_destroy() 67 static int ingress_dump(struct Qdisc *sch, struct sk_buff *skb) ingress_dump() argument
|
H A D | sch_mqprio.c | 27 static void mqprio_destroy(struct Qdisc *sch) mqprio_destroy() argument 29 struct net_device *dev = qdisc_dev(sch); mqprio_destroy() 30 struct mqprio_sched *priv = qdisc_priv(sch); mqprio_destroy() 92 static int mqprio_init(struct Qdisc *sch, struct nlattr *opt) mqprio_init() argument 94 struct net_device *dev = qdisc_dev(sch); mqprio_init() 95 struct mqprio_sched *priv = qdisc_priv(sch); mqprio_init() 104 if (sch->parent != TC_H_ROOT) mqprio_init() 128 TC_H_MAKE(TC_H_MAJ(sch->handle), mqprio_init() 158 sch->flags |= TCQ_F_MQROOT; mqprio_init() 162 mqprio_destroy(sch); mqprio_init() 166 static void mqprio_attach(struct Qdisc *sch) mqprio_attach() argument 168 struct net_device *dev = qdisc_dev(sch); mqprio_attach() 169 struct mqprio_sched *priv = qdisc_priv(sch); mqprio_attach() 186 static struct netdev_queue *mqprio_queue_get(struct Qdisc *sch, mqprio_queue_get() argument 189 struct net_device *dev = qdisc_dev(sch); mqprio_queue_get() 197 static int mqprio_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new, mqprio_graft() argument 200 struct net_device *dev = qdisc_dev(sch); mqprio_graft() 201 struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl); mqprio_graft() 220 static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb) mqprio_dump() argument 222 struct net_device *dev = qdisc_dev(sch); mqprio_dump() 223 struct mqprio_sched *priv = qdisc_priv(sch); mqprio_dump() 229 sch->q.qlen = 0; mqprio_dump() 230 memset(&sch->bstats, 0, sizeof(sch->bstats)); mqprio_dump() 231 memset(&sch->qstats, 0, sizeof(sch->qstats)); mqprio_dump() 236 sch->q.qlen += qdisc->q.qlen; mqprio_dump() 237 sch->bstats.bytes += qdisc->bstats.bytes; mqprio_dump() 238 sch->bstats.packets += qdisc->bstats.packets; mqprio_dump() 239 sch->qstats.backlog += qdisc->qstats.backlog; mqprio_dump() 240 sch->qstats.drops += qdisc->qstats.drops; mqprio_dump() 241 sch->qstats.requeues += qdisc->qstats.requeues; mqprio_dump() 242 sch->qstats.overlimits += qdisc->qstats.overlimits; mqprio_dump() 264 static struct Qdisc *mqprio_leaf(struct Qdisc *sch, unsigned long cl) mqprio_leaf() argument 266 struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl); mqprio_leaf() 274 static unsigned long mqprio_get(struct Qdisc *sch, u32 classid) mqprio_get() argument 276 struct net_device *dev = qdisc_dev(sch); mqprio_get() 284 static void mqprio_put(struct Qdisc *sch, unsigned long cl) mqprio_put() argument 288 static int mqprio_dump_class(struct Qdisc *sch, unsigned long cl, mqprio_dump_class() argument 291 struct net_device *dev = qdisc_dev(sch); mqprio_dump_class() 300 dev_queue = mqprio_queue_get(sch, cl); mqprio_dump_class() 309 TC_H_MAKE(TC_H_MAJ(sch->handle), mqprio_dump_class() 320 static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl, 325 struct net_device *dev = qdisc_dev(sch); 362 struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl); 364 sch = dev_queue->qdisc_sleeping; 365 if (gnet_stats_copy_basic(d, NULL, &sch->bstats) < 0 || 367 &sch->qstats, sch->q.qlen) < 0) 373 static void mqprio_walk(struct Qdisc *sch, struct qdisc_walker *arg) mqprio_walk() argument 375 struct net_device *dev = qdisc_dev(sch); mqprio_walk() 386 if (arg->fn(sch, ntx + 1, arg) < 0) { mqprio_walk()
|
H A D | sch_atm.c | 70 static inline struct atm_flow_data *lookup_flow(struct Qdisc *sch, u32 classid) lookup_flow() argument 72 struct atm_qdisc_data *p = qdisc_priv(sch); lookup_flow() 82 static int atm_tc_graft(struct Qdisc *sch, unsigned long arg, atm_tc_graft() argument 85 struct atm_qdisc_data *p = qdisc_priv(sch); atm_tc_graft() 88 pr_debug("atm_tc_graft(sch %p,[qdisc %p],flow %p,new %p,old %p)\n", atm_tc_graft() 89 sch, p, flow, new, old); atm_tc_graft() 101 static struct Qdisc *atm_tc_leaf(struct Qdisc *sch, unsigned long cl) atm_tc_leaf() argument 105 pr_debug("atm_tc_leaf(sch %p,flow %p)\n", sch, flow); atm_tc_leaf() 109 static unsigned long atm_tc_get(struct Qdisc *sch, u32 classid) atm_tc_get() argument 111 struct atm_qdisc_data *p __maybe_unused = qdisc_priv(sch); atm_tc_get() 114 pr_debug("atm_tc_get(sch %p,[qdisc %p],classid %x)\n", sch, p, classid); atm_tc_get() 115 flow = lookup_flow(sch, classid); atm_tc_get() 122 static unsigned long atm_tc_bind_filter(struct Qdisc *sch, atm_tc_bind_filter() argument 125 return atm_tc_get(sch, classid); atm_tc_bind_filter() 133 static void atm_tc_put(struct Qdisc *sch, unsigned long cl) atm_tc_put() argument 135 struct atm_qdisc_data *p = qdisc_priv(sch); atm_tc_put() 138 pr_debug("atm_tc_put(sch %p,[qdisc %p],flow %p)\n", sch, p, flow); atm_tc_put() 153 atm_tc_put(sch, (unsigned long)flow->excess); atm_tc_put() 185 static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent, atm_tc_change() argument 188 struct atm_qdisc_data *p = qdisc_priv(sch); atm_tc_change() 197 pr_debug("atm_tc_change(sch %p,[qdisc %p],classid %x,parent %x," atm_tc_change() 198 "flow %p,opt %p)\n", sch, p, classid, parent, flow, opt); atm_tc_change() 202 if (parent && parent != TC_H_ROOT && parent != sch->handle) atm_tc_change() 235 atm_tc_get(sch, nla_get_u32(tb[TCA_ATM_EXCESS])); atm_tc_change() 252 if (TC_H_MAJ(classid ^ sch->handle)) { atm_tc_change() 262 classid = TC_H_MAKE(sch->handle, 0x8000 | i); atm_tc_change() 263 cl = atm_tc_get(sch, classid); atm_tc_change() 266 atm_tc_put(sch, cl); atm_tc_change() 277 flow->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid); atm_tc_change() 301 atm_tc_put(sch, (unsigned long)excess); atm_tc_change() 306 static int atm_tc_delete(struct Qdisc *sch, unsigned long arg) atm_tc_delete() argument 308 struct atm_qdisc_data *p = qdisc_priv(sch); atm_tc_delete() 311 pr_debug("atm_tc_delete(sch %p,[qdisc %p],flow %p)\n", sch, p, flow); atm_tc_delete() 326 atm_tc_put(sch, arg); atm_tc_delete() 330 static void atm_tc_walk(struct Qdisc *sch, struct qdisc_walker *walker) atm_tc_walk() argument 332 struct atm_qdisc_data *p = qdisc_priv(sch); atm_tc_walk() 335 pr_debug("atm_tc_walk(sch %p,[qdisc %p],walker %p)\n", sch, p, walker); atm_tc_walk() 340 walker->fn(sch, (unsigned long)flow, walker) < 0) { atm_tc_walk() 348 static struct tcf_proto __rcu **atm_tc_find_tcf(struct Qdisc *sch, atm_tc_find_tcf() argument 351 struct atm_qdisc_data *p = qdisc_priv(sch); atm_tc_find_tcf() 354 pr_debug("atm_tc_find_tcf(sch %p,[qdisc %p],flow %p)\n", sch, p, flow); atm_tc_find_tcf() 360 static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch) atm_tc_enqueue() argument 362 struct atm_qdisc_data *p = qdisc_priv(sch); atm_tc_enqueue() 368 pr_debug("atm_tc_enqueue(skb %p,sch %p,[qdisc %p])\n", skb, sch, p); atm_tc_enqueue() 371 if (TC_H_MAJ(skb->priority) != sch->handle || atm_tc_enqueue() 372 !(flow = (struct atm_flow_data *)atm_tc_get(sch, skb->priority))) { atm_tc_enqueue() 383 flow = lookup_flow(sch, res.classid); atm_tc_enqueue() 420 qdisc_qstats_drop(sch); atm_tc_enqueue() 430 * success at this place. Also, sch->q.qdisc needs to reflect whether atm_tc_enqueue() 436 sch->q.qlen++; atm_tc_enqueue() 452 struct Qdisc *sch = (struct Qdisc *)data; sch_atm_dequeue() local 453 struct atm_qdisc_data *p = qdisc_priv(sch); sch_atm_dequeue() 457 pr_debug("sch_atm_dequeue(sch %p,[qdisc %p])\n", sch, p); sch_atm_dequeue() 473 qdisc_bstats_update(sch, skb); sch_atm_dequeue() 500 static struct sk_buff *atm_tc_dequeue(struct Qdisc *sch) atm_tc_dequeue() argument 502 struct atm_qdisc_data *p = qdisc_priv(sch); atm_tc_dequeue() 505 pr_debug("atm_tc_dequeue(sch %p,[qdisc %p])\n", sch, p); atm_tc_dequeue() 509 sch->q.qlen--; atm_tc_dequeue() 513 static struct sk_buff *atm_tc_peek(struct Qdisc *sch) atm_tc_peek() argument 515 struct atm_qdisc_data *p = qdisc_priv(sch); atm_tc_peek() 517 pr_debug("atm_tc_peek(sch %p,[qdisc %p])\n", sch, p); atm_tc_peek() 522 static unsigned int atm_tc_drop(struct Qdisc *sch) atm_tc_drop() argument 524 struct atm_qdisc_data *p = qdisc_priv(sch); atm_tc_drop() 528 pr_debug("atm_tc_drop(sch %p,[qdisc %p])\n", sch, p); atm_tc_drop() 536 static int atm_tc_init(struct Qdisc *sch, struct nlattr *opt) atm_tc_init() argument 538 struct atm_qdisc_data *p = qdisc_priv(sch); atm_tc_init() 540 pr_debug("atm_tc_init(sch %p,[qdisc %p],opt %p)\n", sch, p, opt); atm_tc_init() 544 p->link.q = qdisc_create_dflt(sch->dev_queue, atm_tc_init() 545 &pfifo_qdisc_ops, sch->handle); atm_tc_init() 552 p->link.classid = sch->handle; atm_tc_init() 554 tasklet_init(&p->task, sch_atm_dequeue, (unsigned long)sch); atm_tc_init() 558 static void atm_tc_reset(struct Qdisc *sch) atm_tc_reset() argument 560 struct atm_qdisc_data *p = qdisc_priv(sch); atm_tc_reset() 563 pr_debug("atm_tc_reset(sch %p,[qdisc %p])\n", sch, p); atm_tc_reset() 566 sch->q.qlen = 0; atm_tc_reset() 569 static void atm_tc_destroy(struct Qdisc *sch) atm_tc_destroy() argument 571 struct atm_qdisc_data *p = qdisc_priv(sch); atm_tc_destroy() 574 pr_debug("atm_tc_destroy(sch %p,[qdisc %p])\n", sch, p); atm_tc_destroy() 581 atm_tc_put(sch, (unsigned long)flow); atm_tc_destroy() 586 static int atm_tc_dump_class(struct Qdisc *sch, unsigned long cl, atm_tc_dump_class() argument 589 struct atm_qdisc_data *p = qdisc_priv(sch); atm_tc_dump_class() 593 pr_debug("atm_tc_dump_class(sch %p,[qdisc %p],flow %p,skb %p,tcm %p)\n", atm_tc_dump_class() 594 sch, p, flow, skb, tcm); atm_tc_dump_class() 635 atm_tc_dump_class_stats(struct Qdisc *sch, unsigned long arg, atm_tc_dump_class_stats() argument 647 static int atm_tc_dump(struct Qdisc *sch, struct sk_buff *skb) atm_tc_dump() argument
|
H A D | sch_gred.c | 92 static inline int gred_wred_mode_check(struct Qdisc *sch) gred_wred_mode_check() argument 94 struct gred_sched *table = qdisc_priv(sch); gred_wred_mode_check() 115 struct Qdisc *sch) gred_backlog() 118 return sch->qstats.backlog; gred_backlog() 152 static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch) gred_enqueue() argument 155 struct gred_sched *t = qdisc_priv(sch); gred_enqueue() 168 if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <= gred_enqueue() 169 sch->limit)) gred_enqueue() 170 return qdisc_enqueue_tail(skb, sch); gred_enqueue() 200 gred_backlog(t, q, sch)); gred_enqueue() 213 qdisc_qstats_overlimit(sch); gred_enqueue() 223 qdisc_qstats_overlimit(sch); gred_enqueue() 233 if (gred_backlog(t, q, sch) + qdisc_pkt_len(skb) <= q->limit) { gred_enqueue() 235 return qdisc_enqueue_tail(skb, sch); gred_enqueue() 240 return qdisc_drop(skb, sch); gred_enqueue() 243 qdisc_drop(skb, sch); gred_enqueue() 247 static struct sk_buff *gred_dequeue(struct Qdisc *sch) gred_dequeue() argument 250 struct gred_sched *t = qdisc_priv(sch); gred_dequeue() 252 skb = qdisc_dequeue_head(sch); gred_dequeue() 265 if (!sch->qstats.backlog) gred_dequeue() 279 static unsigned int gred_drop(struct Qdisc *sch) gred_drop() argument 282 struct gred_sched *t = qdisc_priv(sch); gred_drop() 284 skb = qdisc_dequeue_tail(sch); gred_drop() 298 if (!sch->qstats.backlog) gred_drop() 306 qdisc_drop(skb, sch); gred_drop() 313 static void gred_reset(struct Qdisc *sch) gred_reset() argument 316 struct gred_sched *t = qdisc_priv(sch); gred_reset() 318 qdisc_reset_queue(sch); gred_reset() 336 static inline int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps) gred_change_table_def() argument 338 struct gred_sched *table = qdisc_priv(sch); gred_change_table_def() 350 sch_tree_lock(sch); gred_change_table_def() 360 sch_tree_unlock(sch); gred_change_table_def() 365 if (gred_wred_mode_check(sch)) gred_change_table_def() 384 static inline int gred_change_vq(struct Qdisc *sch, int dp, gred_change_vq() argument 389 struct gred_sched *table = qdisc_priv(sch); gred_change_vq() 401 if (ctl->limit > sch->limit) gred_change_vq() 402 q->limit = sch->limit; gred_change_vq() 424 static int gred_change(struct Qdisc *sch, struct nlattr *opt) gred_change() argument 426 struct gred_sched *table = qdisc_priv(sch); gred_change() 443 sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]); gred_change() 444 return gred_change_table_def(sch, opt); gred_change() 477 sch_tree_lock(sch); gred_change() 479 err = gred_change_vq(sch, ctl->DP, ctl, prio, stab, max_P, &prealloc); gred_change() 485 if (gred_wred_mode_check(sch)) gred_change() 492 sch_tree_unlock(sch); gred_change() 498 static int gred_init(struct Qdisc *sch, struct nlattr *opt) gred_init() argument 514 sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]); gred_init() 516 sch->limit = qdisc_dev(sch)->tx_queue_len gred_init() 517 * psched_mtu(qdisc_dev(sch)); gred_init() 519 return gred_change_table_def(sch, tb[TCA_GRED_DPS]); gred_init() 522 static int gred_dump(struct Qdisc *sch, struct sk_buff *skb) gred_dump() argument 524 struct gred_sched *table = qdisc_priv(sch); gred_dump() 549 if (nla_put_u32(skb, TCA_GRED_LIMIT, sch->limit)) gred_dump() 574 opt.backlog = gred_backlog(table, q, sch); gred_dump() 609 static void gred_destroy(struct Qdisc *sch) gred_destroy() argument 611 struct gred_sched *table = qdisc_priv(sch); gred_destroy() 113 gred_backlog(struct gred_sched *table, struct gred_sched_data *q, struct Qdisc *sch) gred_backlog() argument
|
H A D | sch_choke.c | 118 static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx) choke_drop_by_idx() argument 120 struct choke_sched_data *q = qdisc_priv(sch); choke_drop_by_idx() 130 qdisc_qstats_backlog_dec(sch, skb); choke_drop_by_idx() 131 qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(skb)); choke_drop_by_idx() 132 qdisc_drop(skb, sch); choke_drop_by_idx() 133 --sch->q.qlen; choke_drop_by_idx() 195 struct Qdisc *sch, int *qerr) choke_classify() 198 struct choke_sched_data *q = qdisc_priv(sch); choke_classify() 264 static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch) choke_enqueue() argument 267 struct choke_sched_data *q = qdisc_priv(sch); choke_enqueue() 272 if (!choke_classify(skb, sch, &ret)) choke_enqueue() 278 q->vars.qavg = red_calc_qavg(p, &q->vars, sch->q.qlen); choke_enqueue() 291 choke_drop_by_idx(sch, idx); choke_enqueue() 299 qdisc_qstats_overlimit(sch); choke_enqueue() 312 qdisc_qstats_overlimit(sch); choke_enqueue() 325 if (sch->q.qlen < q->limit) { choke_enqueue() 328 ++sch->q.qlen; choke_enqueue() 329 qdisc_qstats_backlog_inc(sch, skb); choke_enqueue() 334 return qdisc_drop(skb, sch); choke_enqueue() 337 qdisc_drop(skb, sch); choke_enqueue() 342 qdisc_qstats_drop(sch); choke_enqueue() 347 static struct sk_buff *choke_dequeue(struct Qdisc *sch) choke_dequeue() argument 349 struct choke_sched_data *q = qdisc_priv(sch); choke_dequeue() 361 --sch->q.qlen; choke_dequeue() 362 qdisc_qstats_backlog_dec(sch, skb); choke_dequeue() 363 qdisc_bstats_update(sch, skb); choke_dequeue() 368 static unsigned int choke_drop(struct Qdisc *sch) choke_drop() argument 370 struct choke_sched_data *q = qdisc_priv(sch); choke_drop() 373 len = qdisc_queue_drop(sch); choke_drop() 384 static void choke_reset(struct Qdisc *sch) choke_reset() argument 386 struct choke_sched_data *q = qdisc_priv(sch); choke_reset() 394 qdisc_qstats_backlog_dec(sch, skb); choke_reset() 395 --sch->q.qlen; choke_reset() 396 qdisc_drop(skb, sch); choke_reset() 416 static int choke_change(struct Qdisc *sch, struct nlattr *opt) choke_change() argument 418 struct choke_sched_data *q = qdisc_priv(sch); choke_change() 455 sch_tree_lock(sch); choke_change() 458 unsigned int oqlen = sch->q.qlen, tail = 0; choke_change() 472 qdisc_qstats_backlog_dec(sch, skb); choke_change() 473 --sch->q.qlen; choke_change() 474 qdisc_drop(skb, sch); choke_change() 476 qdisc_tree_reduce_backlog(sch, oqlen - sch->q.qlen, dropped); choke_change() 484 sch_tree_lock(sch); choke_change() 498 sch_tree_unlock(sch); choke_change() 503 static int choke_init(struct Qdisc *sch, struct nlattr *opt) choke_init() argument 505 return choke_change(sch, opt); choke_init() 508 static int choke_dump(struct Qdisc *sch, struct sk_buff *skb) choke_dump() argument 510 struct choke_sched_data *q = qdisc_priv(sch); choke_dump() 536 static int choke_dump_stats(struct Qdisc *sch, struct gnet_dump *d) choke_dump_stats() argument 538 struct choke_sched_data *q = qdisc_priv(sch); choke_dump_stats() 550 static void choke_destroy(struct Qdisc *sch) choke_destroy() argument 552 struct choke_sched_data *q = qdisc_priv(sch); choke_destroy() 558 static struct sk_buff *choke_peek_head(struct Qdisc *sch) choke_peek_head() argument 560 struct choke_sched_data *q = qdisc_priv(sch); choke_peek_head() 194 choke_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) choke_classify() argument
|
H A D | sch_pie.c | 98 static bool drop_early(struct Qdisc *sch, u32 packet_size) drop_early() argument 100 struct pie_sched_data *q = qdisc_priv(sch); drop_early() 103 u32 mtu = psched_mtu(qdisc_dev(sch)); drop_early() 119 if (sch->qstats.backlog < 2 * mtu) drop_early() 137 static int pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch) pie_qdisc_enqueue() argument 139 struct pie_sched_data *q = qdisc_priv(sch); pie_qdisc_enqueue() 142 if (unlikely(qdisc_qlen(sch) >= sch->limit)) { pie_qdisc_enqueue() 147 if (!drop_early(sch, skb->len)) { pie_qdisc_enqueue() 161 if (qdisc_qlen(sch) > q->stats.maxq) pie_qdisc_enqueue() 162 q->stats.maxq = qdisc_qlen(sch); pie_qdisc_enqueue() 164 return qdisc_enqueue_tail(skb, sch); pie_qdisc_enqueue() 169 return qdisc_drop(skb, sch); pie_qdisc_enqueue() 182 static int pie_change(struct Qdisc *sch, struct nlattr *opt) pie_change() argument 184 struct pie_sched_data *q = qdisc_priv(sch); pie_change() 196 sch_tree_lock(sch); pie_change() 215 sch->limit = limit; pie_change() 231 qlen = sch->q.qlen; pie_change() 232 while (sch->q.qlen > sch->limit) { pie_change() 233 struct sk_buff *skb = __skb_dequeue(&sch->q); pie_change() 236 qdisc_qstats_backlog_dec(sch, skb); pie_change() 237 qdisc_drop(skb, sch); pie_change() 239 qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped); pie_change() 241 sch_tree_unlock(sch); pie_change() 245 static void pie_process_dequeue(struct Qdisc *sch, struct sk_buff *skb) pie_process_dequeue() argument 248 struct pie_sched_data *q = qdisc_priv(sch); pie_process_dequeue() 249 int qlen = sch->qstats.backlog; /* current queue size in bytes */ pie_process_dequeue() 311 static void calculate_probability(struct Qdisc *sch) calculate_probability() argument 313 struct pie_sched_data *q = qdisc_priv(sch); calculate_probability() 314 u32 qlen = sch->qstats.backlog; /* queue size in bytes */ calculate_probability() 426 struct Qdisc *sch = (struct Qdisc *)arg; pie_timer() local 427 struct pie_sched_data *q = qdisc_priv(sch); pie_timer() 428 spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch)); pie_timer() 431 calculate_probability(sch); pie_timer() 440 static int pie_init(struct Qdisc *sch, struct nlattr *opt) pie_init() argument 442 struct pie_sched_data *q = qdisc_priv(sch); pie_init() 446 sch->limit = q->params.limit; pie_init() 448 setup_timer(&q->adapt_timer, pie_timer, (unsigned long)sch); pie_init() 451 int err = pie_change(sch, opt); pie_init() 461 static int pie_dump(struct Qdisc *sch, struct sk_buff *skb) pie_dump() argument 463 struct pie_sched_data *q = qdisc_priv(sch); pie_dump() 474 nla_put_u32(skb, TCA_PIE_LIMIT, sch->limit) || pie_dump() 490 static int pie_dump_stats(struct Qdisc *sch, struct gnet_dump *d) pie_dump_stats() argument 492 struct pie_sched_data *q = qdisc_priv(sch); pie_dump_stats() 510 static struct sk_buff *pie_qdisc_dequeue(struct Qdisc *sch) pie_qdisc_dequeue() argument 513 skb = __qdisc_dequeue_head(sch, &sch->q); pie_qdisc_dequeue() 518 pie_process_dequeue(sch, skb); pie_qdisc_dequeue() 522 static void pie_reset(struct Qdisc *sch) pie_reset() argument 524 struct pie_sched_data *q = qdisc_priv(sch); pie_reset() 525 qdisc_reset_queue(sch); pie_reset() 529 static void pie_destroy(struct Qdisc *sch) pie_destroy() argument 531 struct pie_sched_data *q = qdisc_priv(sch); pie_destroy()
|
H A D | sch_tbf.c | 158 static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch) tbf_segment() argument 160 struct tbf_sched_data *q = qdisc_priv(sch); tbf_segment() 169 return qdisc_reshape_fail(skb, sch); tbf_segment() 180 qdisc_qstats_drop(sch); tbf_segment() 186 sch->q.qlen += nb; tbf_segment() 188 qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len); tbf_segment() 193 static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch) tbf_enqueue() argument 195 struct tbf_sched_data *q = qdisc_priv(sch); tbf_enqueue() 200 return tbf_segment(skb, sch); tbf_enqueue() 201 return qdisc_reshape_fail(skb, sch); tbf_enqueue() 206 qdisc_qstats_drop(sch); tbf_enqueue() 210 sch->q.qlen++; tbf_enqueue() 214 static unsigned int tbf_drop(struct Qdisc *sch) tbf_drop() argument 216 struct tbf_sched_data *q = qdisc_priv(sch); tbf_drop() 220 sch->q.qlen--; tbf_drop() 221 qdisc_qstats_drop(sch); tbf_drop() 231 static struct sk_buff *tbf_dequeue(struct Qdisc *sch) tbf_dequeue() argument 233 struct tbf_sched_data *q = qdisc_priv(sch); tbf_dequeue() 266 sch->q.qlen--; tbf_dequeue() 267 qdisc_unthrottled(sch); tbf_dequeue() 268 qdisc_bstats_update(sch, skb); tbf_dequeue() 287 qdisc_qstats_overlimit(sch); tbf_dequeue() 292 static void tbf_reset(struct Qdisc *sch) tbf_reset() argument 294 struct tbf_sched_data *q = qdisc_priv(sch); tbf_reset() 297 sch->q.qlen = 0; tbf_reset() 314 static int tbf_change(struct Qdisc *sch, struct nlattr *opt) tbf_change() argument 317 struct tbf_sched_data *q = qdisc_priv(sch); tbf_change() 380 if (max_size < psched_mtu(qdisc_dev(sch))) tbf_change() 382 max_size, qdisc_dev(sch)->name, tbf_change() 383 psched_mtu(qdisc_dev(sch))); tbf_change() 395 child = fifo_create_dflt(sch, &bfifo_qdisc_ops, qopt->limit); tbf_change() 402 sch_tree_lock(sch); tbf_change() 425 sch_tree_unlock(sch); tbf_change() 431 static int tbf_init(struct Qdisc *sch, struct nlattr *opt) tbf_init() argument 433 struct tbf_sched_data *q = qdisc_priv(sch); tbf_init() 439 qdisc_watchdog_init(&q->watchdog, sch); tbf_init() 442 return tbf_change(sch, opt); tbf_init() 445 static void tbf_destroy(struct Qdisc *sch) tbf_destroy() argument 447 struct tbf_sched_data *q = qdisc_priv(sch); tbf_destroy() 453 static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb) tbf_dump() argument 455 struct tbf_sched_data *q = qdisc_priv(sch); tbf_dump() 459 sch->qstats.backlog = q->qdisc->qstats.backlog; tbf_dump() 489 static int tbf_dump_class(struct Qdisc *sch, unsigned long cl, tbf_dump_class() argument 492 struct tbf_sched_data *q = qdisc_priv(sch); tbf_dump_class() 500 static int tbf_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, tbf_graft() argument 503 struct tbf_sched_data *q = qdisc_priv(sch); tbf_graft() 508 *old = qdisc_replace(sch, new, &q->qdisc); tbf_graft() 512 static struct Qdisc *tbf_leaf(struct Qdisc *sch, unsigned long arg) tbf_leaf() argument 514 struct tbf_sched_data *q = qdisc_priv(sch); tbf_leaf() 518 static unsigned long tbf_get(struct Qdisc *sch, u32 classid) tbf_get() argument 523 static void tbf_put(struct Qdisc *sch, unsigned long arg) tbf_put() argument 527 static void tbf_walk(struct Qdisc *sch, struct qdisc_walker *walker) tbf_walk() argument 531 if (walker->fn(sch, 1, walker) < 0) { tbf_walk()
|
H A D | sch_sfb.c | 278 static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch) sfb_enqueue() argument 281 struct sfb_sched_data *q = qdisc_priv(sch); sfb_enqueue() 291 if (unlikely(sch->q.qlen >= q->limit)) { sfb_enqueue() 292 qdisc_qstats_overlimit(sch); sfb_enqueue() 345 qdisc_qstats_overlimit(sch); sfb_enqueue() 371 qdisc_qstats_overlimit(sch); sfb_enqueue() 402 sch->q.qlen++; sfb_enqueue() 406 qdisc_qstats_drop(sch); sfb_enqueue() 411 qdisc_drop(skb, sch); sfb_enqueue() 415 qdisc_qstats_drop(sch); sfb_enqueue() 420 static struct sk_buff *sfb_dequeue(struct Qdisc *sch) sfb_dequeue() argument 422 struct sfb_sched_data *q = qdisc_priv(sch); sfb_dequeue() 429 qdisc_bstats_update(sch, skb); sfb_dequeue() 430 sch->q.qlen--; sfb_dequeue() 437 static struct sk_buff *sfb_peek(struct Qdisc *sch) sfb_peek() argument 439 struct sfb_sched_data *q = qdisc_priv(sch); sfb_peek() 447 static void sfb_reset(struct Qdisc *sch) sfb_reset() argument 449 struct sfb_sched_data *q = qdisc_priv(sch); sfb_reset() 452 sch->q.qlen = 0; sfb_reset() 459 static void sfb_destroy(struct Qdisc *sch) sfb_destroy() argument 461 struct sfb_sched_data *q = qdisc_priv(sch); sfb_destroy() 483 static int sfb_change(struct Qdisc *sch, struct nlattr *opt) sfb_change() argument 485 struct sfb_sched_data *q = qdisc_priv(sch); sfb_change() 505 limit = qdisc_dev(sch)->tx_queue_len; sfb_change() 507 child = fifo_create_dflt(sch, &pfifo_qdisc_ops, limit); sfb_change() 511 sch_tree_lock(sch); sfb_change() 537 sch_tree_unlock(sch); sfb_change() 542 static int sfb_init(struct Qdisc *sch, struct nlattr *opt) sfb_init() argument 544 struct sfb_sched_data *q = qdisc_priv(sch); sfb_init() 547 return sfb_change(sch, opt); sfb_init() 550 static int sfb_dump(struct Qdisc *sch, struct sk_buff *skb) sfb_dump() argument 552 struct sfb_sched_data *q = qdisc_priv(sch); sfb_dump() 566 sch->qstats.backlog = q->qdisc->qstats.backlog; sfb_dump() 579 static int sfb_dump_stats(struct Qdisc *sch, struct gnet_dump *d) sfb_dump_stats() argument 581 struct sfb_sched_data *q = qdisc_priv(sch); sfb_dump_stats() 596 static int sfb_dump_class(struct Qdisc *sch, unsigned long cl, sfb_dump_class() argument 602 static int sfb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, sfb_graft() argument 605 struct sfb_sched_data *q = qdisc_priv(sch); sfb_graft() 610 *old = qdisc_replace(sch, new, &q->qdisc); sfb_graft() 614 static struct Qdisc *sfb_leaf(struct Qdisc *sch, unsigned long arg) sfb_leaf() argument 616 struct sfb_sched_data *q = qdisc_priv(sch); sfb_leaf() 621 static unsigned long sfb_get(struct Qdisc *sch, u32 classid) sfb_get() argument 626 static void sfb_put(struct Qdisc *sch, unsigned long arg) sfb_put() argument 630 static int sfb_change_class(struct Qdisc *sch, u32 classid, u32 parentid, sfb_change_class() argument 636 static int sfb_delete(struct Qdisc *sch, unsigned long cl) sfb_delete() argument 641 static void sfb_walk(struct Qdisc *sch, struct qdisc_walker *walker) sfb_walk() argument 645 if (walker->fn(sch, 1, walker) < 0) { sfb_walk() 653 static struct tcf_proto __rcu **sfb_find_tcf(struct Qdisc *sch, sfb_find_tcf() argument 656 struct sfb_sched_data *q = qdisc_priv(sch); sfb_find_tcf() 663 static unsigned long sfb_bind(struct Qdisc *sch, unsigned long parent, sfb_bind() argument
|
H A D | sch_sfq.c | 164 static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch, sfq_classify() argument 167 struct sfq_sched_data *q = qdisc_priv(sch); sfq_classify() 172 if (TC_H_MAJ(skb->priority) == sch->handle && sfq_classify() 292 static unsigned int sfq_drop(struct Qdisc *sch) sfq_drop() argument 294 struct sfq_sched_data *q = qdisc_priv(sch); sfq_drop() 309 sch->q.qlen--; sfq_drop() 310 qdisc_qstats_drop(sch); sfq_drop() 311 qdisc_qstats_backlog_dec(sch, skb); sfq_drop() 346 sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) sfq_enqueue() argument 348 struct sfq_sched_data *q = qdisc_priv(sch); sfq_enqueue() 356 hash = sfq_classify(skb, sch, &ret); sfq_enqueue() 359 qdisc_qstats_drop(sch); sfq_enqueue() 370 return qdisc_drop(skb, sch); sfq_enqueue() 389 qdisc_qstats_overlimit(sch); sfq_enqueue() 406 qdisc_qstats_overlimit(sch); sfq_enqueue() 427 return qdisc_drop(skb, sch); sfq_enqueue() 432 sch->qstats.backlog -= delta; sfq_enqueue() 434 qdisc_drop(head, sch); sfq_enqueue() 441 qdisc_qstats_backlog_inc(sch, skb); sfq_enqueue() 460 if (++sch->q.qlen <= q->limit) sfq_enqueue() 464 dropped = sfq_drop(sch); sfq_enqueue() 472 qdisc_tree_reduce_backlog(sch, 1, dropped); sfq_enqueue() 477 sfq_dequeue(struct Qdisc *sch) sfq_dequeue() argument 479 struct sfq_sched_data *q = qdisc_priv(sch); sfq_dequeue() 498 qdisc_bstats_update(sch, skb); sfq_dequeue() 499 sch->q.qlen--; sfq_dequeue() 500 qdisc_qstats_backlog_dec(sch, skb); sfq_dequeue() 518 sfq_reset(struct Qdisc *sch) sfq_reset() argument 522 while ((skb = sfq_dequeue(sch)) != NULL) sfq_reset() 532 static void sfq_rehash(struct Qdisc *sch) sfq_rehash() argument 534 struct sfq_sched_data *q = qdisc_priv(sch); sfq_rehash() 568 qdisc_qstats_backlog_dec(sch, skb); sfq_rehash() 598 sch->q.qlen -= dropped; sfq_rehash() 599 qdisc_tree_reduce_backlog(sch, dropped, drop_len); sfq_rehash() 604 struct Qdisc *sch = (struct Qdisc *)arg; sfq_perturbation() local 605 struct sfq_sched_data *q = qdisc_priv(sch); sfq_perturbation() 606 spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch)); sfq_perturbation() 611 sfq_rehash(sch); sfq_perturbation() 618 static int sfq_change(struct Qdisc *sch, struct nlattr *opt) sfq_change() argument 620 struct sfq_sched_data *q = qdisc_priv(sch); sfq_change() 638 sch_tree_lock(sch); sfq_change() 670 qlen = sch->q.qlen; sfq_change() 671 while (sch->q.qlen > q->limit) sfq_change() 672 dropped += sfq_drop(sch); sfq_change() 673 qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped); sfq_change() 680 sch_tree_unlock(sch); sfq_change() 699 static void sfq_destroy(struct Qdisc *sch) sfq_destroy() argument 701 struct sfq_sched_data *q = qdisc_priv(sch); sfq_destroy() 711 static int sfq_init(struct Qdisc *sch, struct nlattr *opt) sfq_init() argument 713 struct sfq_sched_data *q = qdisc_priv(sch); sfq_init() 717 q->perturb_timer.data = (unsigned long)sch; sfq_init() 731 q->quantum = psched_mtu(qdisc_dev(sch)); sfq_init() 737 int err = sfq_change(sch, opt); sfq_init() 745 sfq_destroy(sch); sfq_init() 756 sch->flags |= TCQ_F_CAN_BYPASS; sfq_init() 758 sch->flags &= ~TCQ_F_CAN_BYPASS; sfq_init() 762 static int sfq_dump(struct Qdisc *sch, struct sk_buff *skb) sfq_dump() argument 764 struct sfq_sched_data *q = qdisc_priv(sch); sfq_dump() 799 static struct Qdisc *sfq_leaf(struct Qdisc *sch, unsigned long arg) sfq_leaf() argument 804 static unsigned long sfq_get(struct Qdisc *sch, u32 classid) sfq_get() argument 809 static unsigned long sfq_bind(struct Qdisc *sch, unsigned long parent, sfq_bind() argument 813 sch->flags &= ~TCQ_F_CAN_BYPASS; sfq_bind() 821 static struct tcf_proto __rcu **sfq_find_tcf(struct Qdisc *sch, sfq_find_tcf() argument 824 struct sfq_sched_data *q = qdisc_priv(sch); sfq_find_tcf() 831 static int sfq_dump_class(struct Qdisc *sch, unsigned long cl, sfq_dump_class() argument 838 static int sfq_dump_class_stats(struct Qdisc *sch, unsigned long cl, sfq_dump_class_stats() argument 841 struct sfq_sched_data *q = qdisc_priv(sch); sfq_dump_class_stats() 858 static void sfq_walk(struct Qdisc *sch, struct qdisc_walker *arg) sfq_walk() argument 860 struct sfq_sched_data *q = qdisc_priv(sch); sfq_walk() 872 if (arg->fn(sch, i + 1, arg) < 0) { sfq_walk()
|
H A D | sch_plug.c | 89 static int plug_enqueue(struct sk_buff *skb, struct Qdisc *sch) plug_enqueue() argument 91 struct plug_sched_data *q = qdisc_priv(sch); plug_enqueue() 93 if (likely(sch->qstats.backlog + skb->len <= q->limit)) { plug_enqueue() 96 return qdisc_enqueue_tail(skb, sch); plug_enqueue() 99 return qdisc_reshape_fail(skb, sch); plug_enqueue() 102 static struct sk_buff *plug_dequeue(struct Qdisc *sch) plug_dequeue() argument 104 struct plug_sched_data *q = qdisc_priv(sch); plug_dequeue() 106 if (qdisc_is_throttled(sch)) plug_dequeue() 114 qdisc_throttled(sch); plug_dequeue() 120 return qdisc_dequeue_head(sch); plug_dequeue() 123 static int plug_init(struct Qdisc *sch, struct nlattr *opt) plug_init() argument 125 struct plug_sched_data *q = qdisc_priv(sch); plug_init() 133 q->limit = qdisc_dev(sch)->tx_queue_len plug_init() 134 * psched_mtu(qdisc_dev(sch)); plug_init() 144 qdisc_throttled(sch); plug_init() 158 static int plug_change(struct Qdisc *sch, struct nlattr *opt) plug_change() argument 160 struct plug_sched_data *q = qdisc_priv(sch); plug_change() 176 qdisc_throttled(sch); plug_change() 185 qdisc_unthrottled(sch); plug_change() 186 netif_schedule_queue(sch->dev_queue); plug_change() 193 qdisc_unthrottled(sch); plug_change() 194 netif_schedule_queue(sch->dev_queue); plug_change()
|
H A D | sch_hhf.c | 129 u32 quantum; /* psched_mtu(qdisc_dev(sch)); */ 246 static enum wdrr_bucket_idx hhf_classify(struct sk_buff *skb, struct Qdisc *sch) hhf_classify() argument 248 struct hhf_sched_data *q = qdisc_priv(sch); hhf_classify() 348 static unsigned int hhf_drop(struct Qdisc *sch) hhf_drop() argument 350 struct hhf_sched_data *q = qdisc_priv(sch); hhf_drop() 361 sch->q.qlen--; hhf_drop() 362 qdisc_qstats_drop(sch); hhf_drop() 363 qdisc_qstats_backlog_dec(sch, skb); hhf_drop() 371 static unsigned int hhf_qdisc_drop(struct Qdisc *sch) hhf_qdisc_drop() argument 375 prev_backlog = sch->qstats.backlog; hhf_qdisc_drop() 376 hhf_drop(sch); hhf_qdisc_drop() 377 return prev_backlog - sch->qstats.backlog; hhf_qdisc_drop() 380 static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch) hhf_enqueue() argument 382 struct hhf_sched_data *q = qdisc_priv(sch); hhf_enqueue() 387 idx = hhf_classify(skb, sch); hhf_enqueue() 391 qdisc_qstats_backlog_inc(sch, skb); hhf_enqueue() 410 if (++sch->q.qlen <= sch->limit) hhf_enqueue() 413 prev_backlog = sch->qstats.backlog; hhf_enqueue() 418 if (hhf_drop(sch) == idx) hhf_enqueue() 422 qdisc_tree_reduce_backlog(sch, 1, prev_backlog - sch->qstats.backlog); hhf_enqueue() 426 static struct sk_buff *hhf_dequeue(struct Qdisc *sch) hhf_dequeue() argument 428 struct hhf_sched_data *q = qdisc_priv(sch); hhf_dequeue() 453 sch->q.qlen--; hhf_dequeue() 454 qdisc_qstats_backlog_dec(sch, skb); hhf_dequeue() 465 qdisc_bstats_update(sch, skb); hhf_dequeue() 471 static void hhf_reset(struct Qdisc *sch) hhf_reset() argument 475 while ((skb = hhf_dequeue(sch)) != NULL) hhf_reset() 494 static void hhf_destroy(struct Qdisc *sch) hhf_destroy() argument 497 struct hhf_sched_data *q = qdisc_priv(sch); hhf_destroy() 528 static int hhf_change(struct Qdisc *sch, struct nlattr *opt) hhf_change() argument 530 struct hhf_sched_data *q = qdisc_priv(sch); hhf_change() 555 sch_tree_lock(sch); hhf_change() 558 sch->limit = nla_get_u32(tb[TCA_HHF_BACKLOG_LIMIT]); hhf_change() 581 qlen = sch->q.qlen; hhf_change() 582 prev_backlog = sch->qstats.backlog; hhf_change() 583 while (sch->q.qlen > sch->limit) { hhf_change() 584 struct sk_buff *skb = hhf_dequeue(sch); hhf_change() 588 qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, hhf_change() 589 prev_backlog - sch->qstats.backlog); hhf_change() 591 sch_tree_unlock(sch); hhf_change() 595 static int hhf_init(struct Qdisc *sch, struct nlattr *opt) hhf_init() argument 597 struct hhf_sched_data *q = qdisc_priv(sch); hhf_init() 600 sch->limit = 1000; hhf_init() 601 q->quantum = psched_mtu(qdisc_dev(sch)); hhf_init() 613 int err = hhf_change(sch, opt); hhf_init() 639 hhf_destroy(sch); hhf_init() 650 hhf_destroy(sch); hhf_init() 666 static int hhf_dump(struct Qdisc *sch, struct sk_buff *skb) hhf_dump() argument 668 struct hhf_sched_data *q = qdisc_priv(sch); hhf_dump() 675 if (nla_put_u32(skb, TCA_HHF_BACKLOG_LIMIT, sch->limit) || hhf_dump() 692 static int hhf_dump_stats(struct Qdisc *sch, struct gnet_dump *d) hhf_dump_stats() argument 694 struct hhf_sched_data *q = qdisc_priv(sch); hhf_dump_stats()
|
H A D | sch_fq.c | 299 static struct sk_buff *fq_dequeue_head(struct Qdisc *sch, struct fq_flow *flow) fq_dequeue_head() argument 307 qdisc_qstats_backlog_dec(sch, skb); fq_dequeue_head() 308 sch->q.qlen--; fq_dequeue_head() 371 static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch) fq_enqueue() argument 373 struct fq_sched_data *q = qdisc_priv(sch); fq_enqueue() 376 if (unlikely(sch->q.qlen >= sch->limit)) fq_enqueue() 377 return qdisc_drop(skb, sch); fq_enqueue() 382 return qdisc_drop(skb, sch); fq_enqueue() 388 qdisc_qstats_backlog_inc(sch, skb); fq_enqueue() 402 sch->q.qlen++; fq_enqueue() 428 static struct sk_buff *fq_dequeue(struct Qdisc *sch) fq_dequeue() argument 430 struct fq_sched_data *q = qdisc_priv(sch); fq_dequeue() 437 skb = fq_dequeue_head(sch, &q->internal); fq_dequeue() 470 skb = fq_dequeue_head(sch, f); fq_dequeue() 514 qdisc_bstats_update(sch, skb); fq_dequeue() 518 static void fq_reset(struct Qdisc *sch) fq_reset() argument 520 struct fq_sched_data *q = qdisc_priv(sch); fq_reset() 527 while ((skb = fq_dequeue_head(sch, &q->internal)) != NULL) fq_reset() 539 while ((skb = fq_dequeue_head(sch, f)) != NULL) fq_reset() 613 static int fq_resize(struct Qdisc *sch, u32 log) fq_resize() argument 615 struct fq_sched_data *q = qdisc_priv(sch); fq_resize() 625 netdev_queue_numa_node_read(sch->dev_queue)); fq_resize() 632 sch_tree_lock(sch); fq_resize() 641 sch_tree_unlock(sch); fq_resize() 660 static int fq_change(struct Qdisc *sch, struct nlattr *opt) fq_change() argument 662 struct fq_sched_data *q = qdisc_priv(sch); fq_change() 675 sch_tree_lock(sch); fq_change() 688 sch->limit = nla_get_u32(tb[TCA_FQ_PLIMIT]); fq_change() 731 sch_tree_unlock(sch); fq_change() 732 err = fq_resize(sch, fq_log); fq_change() 733 sch_tree_lock(sch); fq_change() 735 while (sch->q.qlen > sch->limit) { fq_change() 736 struct sk_buff *skb = fq_dequeue(sch); fq_change() 744 qdisc_tree_reduce_backlog(sch, drop_count, drop_len); fq_change() 746 sch_tree_unlock(sch); fq_change() 750 static void fq_destroy(struct Qdisc *sch) fq_destroy() argument 752 struct fq_sched_data *q = qdisc_priv(sch); fq_destroy() 754 fq_reset(sch); fq_destroy() 759 static int fq_init(struct Qdisc *sch, struct nlattr *opt) fq_init() argument 761 struct fq_sched_data *q = qdisc_priv(sch); fq_init() 764 sch->limit = 10000; fq_init() 766 q->quantum = 2 * psched_mtu(qdisc_dev(sch)); fq_init() 767 q->initial_quantum = 10 * psched_mtu(qdisc_dev(sch)); fq_init() 777 qdisc_watchdog_init(&q->watchdog, sch); fq_init() 780 err = fq_change(sch, opt); fq_init() 782 err = fq_resize(sch, q->fq_trees_log); fq_init() 787 static int fq_dump(struct Qdisc *sch, struct sk_buff *skb) fq_dump() argument 789 struct fq_sched_data *q = qdisc_priv(sch); fq_dump() 798 if (nla_put_u32(skb, TCA_FQ_PLIMIT, sch->limit) || fq_dump() 816 static int fq_dump_stats(struct Qdisc *sch, struct gnet_dump *d) fq_dump_stats() argument 818 struct fq_sched_data *q = qdisc_priv(sch); fq_dump_stats()
|
H A D | sch_blackhole.c | 20 static int blackhole_enqueue(struct sk_buff *skb, struct Qdisc *sch) blackhole_enqueue() argument 22 qdisc_drop(skb, sch); blackhole_enqueue() 26 static struct sk_buff *blackhole_dequeue(struct Qdisc *sch) blackhole_dequeue() argument
|
H A D | sch_netem.c | 72 /* internal t(ime)fifo qdisc uses t_root and sch->limit */ 362 static void tfifo_reset(struct Qdisc *sch) tfifo_reset() argument 364 struct netem_sched_data *q = qdisc_priv(sch); tfifo_reset() 377 static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch) tfifo_enqueue() argument 379 struct netem_sched_data *q = qdisc_priv(sch); tfifo_enqueue() 395 sch->q.qlen++; tfifo_enqueue() 402 static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch) netem_segment() argument 410 qdisc_reshape_fail(skb, sch); netem_segment() 423 static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) netem_enqueue() argument 425 struct netem_sched_data *q = qdisc_priv(sch); netem_enqueue() 442 qdisc_qstats_drop(sch); /* mark packet */ netem_enqueue() 447 qdisc_qstats_drop(sch); netem_enqueue() 464 struct Qdisc *rootq = qdisc_root(sch); netem_enqueue() 480 segs = netem_segment(skb, sch); netem_enqueue() 493 rc = qdisc_drop(skb, sch); netem_enqueue() 501 if (unlikely(skb_queue_len(&sch->q) >= sch->limit)) netem_enqueue() 502 return qdisc_reshape_fail(skb, sch); netem_enqueue() 504 qdisc_qstats_backlog_inc(sch, skb); netem_enqueue() 521 if (!skb_queue_empty(&sch->q)) netem_enqueue() 522 last = skb_peek_tail(&sch->q); netem_enqueue() 542 tfifo_enqueue(skb, sch); netem_enqueue() 551 __skb_queue_head(&sch->q, skb); netem_enqueue() 552 sch->qstats.requeues++; netem_enqueue() 562 rc = qdisc_enqueue(segs, sch); netem_enqueue() 565 qdisc_qstats_drop(sch); netem_enqueue() 572 sch->q.qlen += nb; netem_enqueue() 574 qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len); netem_enqueue() 579 static unsigned int netem_drop(struct Qdisc *sch) netem_drop() argument 581 struct netem_sched_data *q = qdisc_priv(sch); netem_drop() 584 len = qdisc_queue_drop(sch); netem_drop() 593 sch->q.qlen--; netem_drop() 596 qdisc_qstats_backlog_dec(sch, skb); netem_drop() 603 qdisc_qstats_drop(sch); netem_drop() 608 static struct sk_buff *netem_dequeue(struct Qdisc *sch) netem_dequeue() argument 610 struct netem_sched_data *q = qdisc_priv(sch); netem_dequeue() 614 if (qdisc_is_throttled(sch)) netem_dequeue() 618 skb = __skb_dequeue(&sch->q); netem_dequeue() 620 qdisc_qstats_backlog_dec(sch, skb); netem_dequeue() 622 qdisc_unthrottled(sch); netem_dequeue() 623 qdisc_bstats_update(sch, skb); netem_dequeue() 637 sch->q.qlen--; netem_dequeue() 638 qdisc_qstats_backlog_dec(sch, skb); netem_dequeue() 657 qdisc_qstats_drop(sch); netem_dequeue() 658 qdisc_tree_reduce_backlog(sch, 1, netem_dequeue() 683 static void netem_reset(struct Qdisc *sch) netem_reset() argument 685 struct netem_sched_data *q = qdisc_priv(sch); netem_reset() 687 qdisc_reset_queue(sch); netem_reset() 688 tfifo_reset(sch); netem_reset() 703 static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr) get_dist_table() argument 705 struct netem_sched_data *q = qdisc_priv(sch); get_dist_table() 727 root_lock = qdisc_root_sleeping_lock(sch); get_dist_table() 859 static int netem_change(struct Qdisc *sch, struct nlattr *opt) netem_change() argument 861 struct netem_sched_data *q = qdisc_priv(sch); netem_change() 891 ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST]); netem_change() 903 sch->limit = qopt->limit; netem_change() 941 static int netem_init(struct Qdisc *sch, struct nlattr *opt) netem_init() argument 943 struct netem_sched_data *q = qdisc_priv(sch); netem_init() 949 qdisc_watchdog_init(&q->watchdog, sch); netem_init() 952 ret = netem_change(sch, opt); netem_init() 958 static void netem_destroy(struct Qdisc *sch) netem_destroy() argument 960 struct netem_sched_data *q = qdisc_priv(sch); netem_destroy() 1018 static int netem_dump(struct Qdisc *sch, struct sk_buff *skb) netem_dump() argument 1020 const struct netem_sched_data *q = qdisc_priv(sch); netem_dump() 1079 static int netem_dump_class(struct Qdisc *sch, unsigned long cl, netem_dump_class() argument 1082 struct netem_sched_data *q = qdisc_priv(sch); netem_dump_class() 1093 static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, netem_graft() argument 1096 struct netem_sched_data *q = qdisc_priv(sch); netem_graft() 1098 *old = qdisc_replace(sch, new, &q->qdisc); netem_graft() 1102 static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg) netem_leaf() argument 1104 struct netem_sched_data *q = qdisc_priv(sch); netem_leaf() 1108 static unsigned long netem_get(struct Qdisc *sch, u32 classid) netem_get() argument 1113 static void netem_put(struct Qdisc *sch, unsigned long arg) netem_put() argument 1117 static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker) netem_walk() argument 1121 if (walker->fn(sch, 1, walker) < 0) { netem_walk()
|
H A D | sch_cbq.c | 217 cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) cbq_classify() argument 219 struct cbq_sched_data *q = qdisc_priv(sch); cbq_classify() 230 if (TC_H_MAJ(prio ^ sch->handle) == 0 && cbq_classify() 369 cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch) cbq_enqueue() argument 371 struct cbq_sched_data *q = qdisc_priv(sch); cbq_enqueue() 373 struct cbq_class *cl = cbq_classify(skb, sch, &ret); cbq_enqueue() 380 qdisc_qstats_drop(sch); cbq_enqueue() 386 cl->q->__parent = sch; cbq_enqueue() 390 sch->q.qlen++; cbq_enqueue() 398 qdisc_qstats_drop(sch); cbq_enqueue() 591 struct Qdisc *sch = q->watchdog.qdisc; cbq_undelay() local 623 qdisc_unthrottled(sch); cbq_undelay() 624 __netif_schedule(qdisc_root(sch)); cbq_undelay() 631 struct Qdisc *sch = child->__parent; cbq_reshape_fail() local 632 struct cbq_sched_data *q = qdisc_priv(sch); cbq_reshape_fail() 643 cl->q->__parent = sch; cbq_reshape_fail() 647 sch->q.qlen++; cbq_reshape_fail() 653 qdisc_qstats_drop(sch); cbq_reshape_fail() 657 qdisc_qstats_drop(sch); cbq_reshape_fail() 822 cbq_dequeue_prio(struct Qdisc *sch, int prio) cbq_dequeue_prio() argument 824 struct cbq_sched_data *q = qdisc_priv(sch); cbq_dequeue_prio() 925 cbq_dequeue_1(struct Qdisc *sch) cbq_dequeue_1() argument 927 struct cbq_sched_data *q = qdisc_priv(sch); cbq_dequeue_1() 935 skb = cbq_dequeue_prio(sch, prio); cbq_dequeue_1() 943 cbq_dequeue(struct Qdisc *sch) cbq_dequeue() argument 946 struct cbq_sched_data *q = qdisc_priv(sch); cbq_dequeue() 959 skb = cbq_dequeue_1(sch); cbq_dequeue() 961 qdisc_bstats_update(sch, skb); cbq_dequeue() 962 sch->q.qlen--; cbq_dequeue() 963 qdisc_unthrottled(sch); cbq_dequeue() 997 if (sch->q.qlen) { cbq_dequeue() 998 qdisc_qstats_overlimit(sch); cbq_dequeue() 1169 static unsigned int cbq_drop(struct Qdisc *sch) cbq_drop() argument 1171 struct cbq_sched_data *q = qdisc_priv(sch); cbq_drop() 1184 sch->q.qlen--; cbq_drop() 1195 cbq_reset(struct Qdisc *sch) cbq_reset() argument 1197 struct cbq_sched_data *q = qdisc_priv(sch); cbq_reset() 1225 sch->q.qlen = 0; cbq_reset() 1343 static int cbq_init(struct Qdisc *sch, struct nlattr *opt) cbq_init() argument 1345 struct cbq_sched_data *q = qdisc_priv(sch); cbq_init() 1368 q->link.common.classid = sch->handle; cbq_init() 1369 q->link.qdisc = sch; cbq_init() 1370 q->link.q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, cbq_init() 1371 sch->handle); cbq_init() 1380 q->link.allot = psched_mtu(qdisc_dev(sch)); cbq_init() 1388 qdisc_watchdog_init(&q->watchdog, sch); cbq_init() 1538 static int cbq_dump(struct Qdisc *sch, struct sk_buff *skb) cbq_dump() argument 1540 struct cbq_sched_data *q = qdisc_priv(sch); cbq_dump() 1556 cbq_dump_stats(struct Qdisc *sch, struct gnet_dump *d) cbq_dump_stats() argument 1558 struct cbq_sched_data *q = qdisc_priv(sch); cbq_dump_stats() 1565 cbq_dump_class(struct Qdisc *sch, unsigned long arg, cbq_dump_class() argument 1591 cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg, cbq_dump_class_stats() argument 1594 struct cbq_sched_data *q = qdisc_priv(sch); cbq_dump_class_stats() 1611 static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, cbq_graft() argument 1617 new = qdisc_create_dflt(sch->dev_queue, cbq_graft() 1628 *old = qdisc_replace(sch, new, &cl->q); cbq_graft() 1632 static struct Qdisc *cbq_leaf(struct Qdisc *sch, unsigned long arg) cbq_leaf() argument 1639 static void cbq_qlen_notify(struct Qdisc *sch, unsigned long arg) cbq_qlen_notify() argument 1647 static unsigned long cbq_get(struct Qdisc *sch, u32 classid) cbq_get() argument 1649 struct cbq_sched_data *q = qdisc_priv(sch); cbq_get() 1659 static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl) cbq_destroy_class() argument 1661 struct cbq_sched_data *q = qdisc_priv(sch); cbq_destroy_class() 1673 static void cbq_destroy(struct Qdisc *sch) cbq_destroy() argument 1675 struct cbq_sched_data *q = qdisc_priv(sch); cbq_destroy() 1695 cbq_destroy_class(sch, cl); cbq_destroy() 1700 static void cbq_put(struct Qdisc *sch, unsigned long arg) cbq_put() argument 1706 spinlock_t *root_lock = qdisc_root_sleeping_lock(sch); cbq_put() 1707 struct cbq_sched_data *q = qdisc_priv(sch); cbq_put() 1715 cbq_destroy_class(sch, cl); cbq_put() 1720 cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **tca, cbq_change_class() argument 1724 struct cbq_sched_data *q = qdisc_priv(sch); cbq_change_class() 1758 qdisc_root_sleeping_lock(sch), cbq_change_class() 1767 sch_tree_lock(sch); cbq_change_class() 1799 sch_tree_unlock(sch); cbq_change_class() 1817 if (TC_H_MAJ(classid ^ sch->handle) || cbq_change_class() 1822 classid = TC_H_MAKE(sch->handle, 0x8000); cbq_change_class() 1851 qdisc_root_sleeping_lock(sch), cbq_change_class() 1862 cl->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid); cbq_change_class() 1867 cl->qdisc = sch; cbq_change_class() 1872 sch_tree_lock(sch); cbq_change_class() 1896 sch_tree_unlock(sch); cbq_change_class() 1898 qdisc_class_hash_grow(sch, &q->clhash); cbq_change_class() 1908 static int cbq_delete(struct Qdisc *sch, unsigned long arg) cbq_delete() argument 1910 struct cbq_sched_data *q = qdisc_priv(sch); cbq_delete() 1917 sch_tree_lock(sch); cbq_delete() 1944 sch_tree_unlock(sch); cbq_delete() 1955 static struct tcf_proto __rcu **cbq_find_tcf(struct Qdisc *sch, cbq_find_tcf() argument 1958 struct cbq_sched_data *q = qdisc_priv(sch); cbq_find_tcf() 1967 static unsigned long cbq_bind_filter(struct Qdisc *sch, unsigned long parent, cbq_bind_filter() argument 1970 struct cbq_sched_data *q = qdisc_priv(sch); cbq_bind_filter() 1983 static void cbq_unbind_filter(struct Qdisc *sch, unsigned long arg) cbq_unbind_filter() argument 1990 static void cbq_walk(struct Qdisc *sch, struct qdisc_walker *arg) cbq_walk() argument 1992 struct cbq_sched_data *q = qdisc_priv(sch); cbq_walk() 2005 if (arg->fn(sch, (unsigned long)cl, arg) < 0) { cbq_walk()
|
H A D | sch_hfsc.c | 879 qdisc_peek_len(struct Qdisc *sch) qdisc_peek_len() argument 884 skb = sch->ops->peek(sch); qdisc_peek_len() 886 qdisc_warn_nonwc("qdisc_peek_len", sch); qdisc_peek_len() 895 hfsc_purge_queue(struct Qdisc *sch, struct hfsc_class *cl) hfsc_purge_queue() argument 921 hfsc_find_class(u32 classid, struct Qdisc *sch) hfsc_find_class() argument 923 struct hfsc_sched *q = qdisc_priv(sch); hfsc_find_class() 970 hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid, hfsc_change_class() argument 973 struct hfsc_sched *q = qdisc_priv(sch); hfsc_change_class() 1018 spinlock_t *lock = qdisc_root_sleeping_lock(sch); hfsc_change_class() 1028 sch_tree_lock(sch); hfsc_change_class() 1042 sch_tree_unlock(sch); hfsc_change_class() 1052 parent = hfsc_find_class(parentid, sch); hfsc_change_class() 1057 if (classid == 0 || TC_H_MAJ(classid ^ sch->handle) != 0) hfsc_change_class() 1059 if (hfsc_find_class(classid, sch)) hfsc_change_class() 1071 qdisc_root_sleeping_lock(sch), hfsc_change_class() 1090 cl->qdisc = qdisc_create_dflt(sch->dev_queue, hfsc_change_class() 1098 sch_tree_lock(sch); hfsc_change_class() 1102 hfsc_purge_queue(sch, parent); hfsc_change_class() 1105 sch_tree_unlock(sch); hfsc_change_class() 1107 qdisc_class_hash_grow(sch, &q->clhash); hfsc_change_class() 1114 hfsc_destroy_class(struct Qdisc *sch, struct hfsc_class *cl) hfsc_destroy_class() argument 1116 struct hfsc_sched *q = qdisc_priv(sch); hfsc_destroy_class() 1126 hfsc_delete_class(struct Qdisc *sch, unsigned long arg) hfsc_delete_class() argument 1128 struct hfsc_sched *q = qdisc_priv(sch); hfsc_delete_class() 1134 sch_tree_lock(sch); hfsc_delete_class() 1139 hfsc_purge_queue(sch, cl); hfsc_delete_class() 1148 sch_tree_unlock(sch); hfsc_delete_class() 1153 hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) hfsc_classify() argument 1155 struct hfsc_sched *q = qdisc_priv(sch); hfsc_classify() 1161 if (TC_H_MAJ(skb->priority ^ sch->handle) == 0 && hfsc_classify() 1162 (cl = hfsc_find_class(skb->priority, sch)) != NULL) hfsc_classify() 1181 cl = hfsc_find_class(res.classid, sch); hfsc_classify() 1197 cl = hfsc_find_class(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch); hfsc_classify() 1205 hfsc_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, hfsc_graft_class() argument 1213 new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, hfsc_graft_class() 1219 *old = qdisc_replace(sch, new, &cl->qdisc); hfsc_graft_class() 1224 hfsc_class_leaf(struct Qdisc *sch, unsigned long arg) hfsc_class_leaf() argument 1235 hfsc_qlen_notify(struct Qdisc *sch, unsigned long arg) hfsc_qlen_notify() argument 1246 hfsc_get_class(struct Qdisc *sch, u32 classid) hfsc_get_class() argument 1248 struct hfsc_class *cl = hfsc_find_class(classid, sch); hfsc_get_class() 1257 hfsc_put_class(struct Qdisc *sch, unsigned long arg) hfsc_put_class() argument 1262 hfsc_destroy_class(sch, cl); hfsc_put_class() 1266 hfsc_bind_tcf(struct Qdisc *sch, unsigned long parent, u32 classid) hfsc_bind_tcf() argument 1269 struct hfsc_class *cl = hfsc_find_class(classid, sch); hfsc_bind_tcf() 1281 hfsc_unbind_tcf(struct Qdisc *sch, unsigned long arg) hfsc_unbind_tcf() argument 1289 hfsc_tcf_chain(struct Qdisc *sch, unsigned long arg) hfsc_tcf_chain() argument 1291 struct hfsc_sched *q = qdisc_priv(sch); hfsc_tcf_chain() 1339 hfsc_dump_class(struct Qdisc *sch, unsigned long arg, struct sk_buff *skb, hfsc_dump_class() argument 1364 hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg, hfsc_dump_class_stats() argument 1387 hfsc_walk(struct Qdisc *sch, struct qdisc_walker *arg) hfsc_walk() argument 1389 struct hfsc_sched *q = qdisc_priv(sch); hfsc_walk() 1403 if (arg->fn(sch, (unsigned long)cl, arg) < 0) { hfsc_walk() 1413 hfsc_schedule_watchdog(struct Qdisc *sch) hfsc_schedule_watchdog() argument 1415 struct hfsc_sched *q = qdisc_priv(sch); hfsc_schedule_watchdog() 1431 hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt) hfsc_init_qdisc() argument 1433 struct hfsc_sched *q = qdisc_priv(sch); hfsc_init_qdisc() 1448 q->root.cl_common.classid = sch->handle; hfsc_init_qdisc() 1451 q->root.qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, hfsc_init_qdisc() 1452 sch->handle); hfsc_init_qdisc() 1460 qdisc_class_hash_grow(sch, &q->clhash); hfsc_init_qdisc() 1462 qdisc_watchdog_init(&q->watchdog, sch); hfsc_init_qdisc() 1468 hfsc_change_qdisc(struct Qdisc *sch, struct nlattr *opt) hfsc_change_qdisc() argument 1470 struct hfsc_sched *q = qdisc_priv(sch); hfsc_change_qdisc() 1477 sch_tree_lock(sch); hfsc_change_qdisc() 1479 sch_tree_unlock(sch); hfsc_change_qdisc() 1519 hfsc_reset_qdisc(struct Qdisc *sch) hfsc_reset_qdisc() argument 1521 struct hfsc_sched *q = qdisc_priv(sch); hfsc_reset_qdisc() 1532 sch->q.qlen = 0; hfsc_reset_qdisc() 1536 hfsc_destroy_qdisc(struct Qdisc *sch) hfsc_destroy_qdisc() argument 1538 struct hfsc_sched *q = qdisc_priv(sch); hfsc_destroy_qdisc() 1550 hfsc_destroy_class(sch, cl); hfsc_destroy_qdisc() 1557 hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb) hfsc_dump_qdisc() argument 1559 struct hfsc_sched *q = qdisc_priv(sch); hfsc_dump_qdisc() 1565 sch->qstats.backlog = 0; hfsc_dump_qdisc() 1568 sch->qstats.backlog += cl->qdisc->qstats.backlog; hfsc_dump_qdisc() 1582 hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch) hfsc_enqueue() argument 1587 cl = hfsc_classify(skb, sch, &err); hfsc_enqueue() 1590 qdisc_qstats_drop(sch); hfsc_enqueue() 1599 qdisc_qstats_drop(sch); hfsc_enqueue() 1607 sch->q.qlen++; hfsc_enqueue() 1613 hfsc_dequeue(struct Qdisc *sch) hfsc_dequeue() argument 1615 struct hfsc_sched *q = qdisc_priv(sch); hfsc_dequeue() 1622 if (sch->q.qlen == 0) hfsc_dequeue() 1642 qdisc_qstats_overlimit(sch); hfsc_dequeue() 1643 hfsc_schedule_watchdog(sch); hfsc_dequeue() 1673 qdisc_unthrottled(sch); hfsc_dequeue() 1674 qdisc_bstats_update(sch, skb); hfsc_dequeue() 1675 sch->q.qlen--; hfsc_dequeue() 1681 hfsc_drop(struct Qdisc *sch) hfsc_drop() argument 1683 struct hfsc_sched *q = qdisc_priv(sch); hfsc_drop() 1697 qdisc_qstats_drop(sch); hfsc_drop() 1698 sch->q.qlen--; hfsc_drop()
|
H A D | sch_teql.c | 80 teql_enqueue(struct sk_buff *skb, struct Qdisc *sch) teql_enqueue() argument 82 struct net_device *dev = qdisc_dev(sch); teql_enqueue() 83 struct teql_sched_data *q = qdisc_priv(sch); teql_enqueue() 90 return qdisc_drop(skb, sch); teql_enqueue() 94 teql_dequeue(struct Qdisc *sch) teql_dequeue() argument 96 struct teql_sched_data *dat = qdisc_priv(sch); teql_dequeue() 108 dat->m->slaves = sch; teql_dequeue() 112 qdisc_bstats_update(sch, skb); teql_dequeue() 114 sch->q.qlen = dat->q.qlen + q->q.qlen; teql_dequeue() 119 teql_peek(struct Qdisc *sch) teql_peek() argument 126 teql_reset(struct Qdisc *sch) teql_reset() argument 128 struct teql_sched_data *dat = qdisc_priv(sch); teql_reset() 131 sch->q.qlen = 0; teql_reset() 135 teql_destroy(struct Qdisc *sch) teql_destroy() argument 138 struct teql_sched_data *dat = qdisc_priv(sch); teql_destroy() 145 if (q == sch) { teql_destroy() 170 static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt) teql_qdisc_init() argument 172 struct net_device *dev = qdisc_dev(sch); teql_qdisc_init() 173 struct teql_master *m = (struct teql_master *)sch->ops; teql_qdisc_init() 174 struct teql_sched_data *q = qdisc_priv(sch); teql_qdisc_init() 207 NEXT_SLAVE(m->slaves) = sch; teql_qdisc_init() 209 q->next = sch; teql_qdisc_init() 210 m->slaves = sch; teql_qdisc_init()
|
H A D | sch_htb.c | 181 static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch) htb_find() argument 183 struct htb_sched *q = qdisc_priv(sch); htb_find() 206 static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch, htb_classify() argument 209 struct htb_sched *q = qdisc_priv(sch); htb_classify() 219 if (skb->priority == sch->handle) htb_classify() 221 cl = htb_find(skb->priority, sch); htb_classify() 244 if (res.classid == sch->handle) htb_classify() 246 cl = htb_find(res.classid, sch); htb_classify() 257 cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch); htb_classify() 572 static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch) htb_enqueue() argument 575 struct htb_sched *q = qdisc_priv(sch); htb_enqueue() 576 struct htb_class *cl = htb_classify(skb, sch, &ret); htb_enqueue() 584 return qdisc_drop(skb, sch); htb_enqueue() 589 qdisc_qstats_drop(sch); htb_enqueue() 595 qdisc_qstats_drop(sch); htb_enqueue() 603 qdisc_qstats_backlog_inc(sch, skb); htb_enqueue() 604 sch->q.qlen++; htb_enqueue() 879 static struct sk_buff *htb_dequeue(struct Qdisc *sch) htb_dequeue() argument 882 struct htb_sched *q = qdisc_priv(sch); htb_dequeue() 891 qdisc_bstats_update(sch, skb); htb_dequeue() 892 qdisc_unthrottled(sch); htb_dequeue() 893 qdisc_qstats_backlog_dec(sch, skb); htb_dequeue() 894 sch->q.qlen--; htb_dequeue() 898 if (!sch->q.qlen) htb_dequeue() 930 qdisc_qstats_overlimit(sch); htb_dequeue() 947 static unsigned int htb_drop(struct Qdisc *sch) htb_drop() argument 949 struct htb_sched *q = qdisc_priv(sch); htb_drop() 960 sch->qstats.backlog -= len; htb_drop() 961 sch->q.qlen--; htb_drop() 973 static void htb_reset(struct Qdisc *sch) htb_reset() argument 975 struct htb_sched *q = qdisc_priv(sch); htb_reset() 994 sch->q.qlen = 0; htb_reset() 995 sch->qstats.backlog = 0; htb_reset() 1015 struct Qdisc *sch = q->watchdog.qdisc; htb_work_func() local 1017 __netif_schedule(qdisc_root(sch)); htb_work_func() 1020 static int htb_init(struct Qdisc *sch, struct nlattr *opt) htb_init() argument 1022 struct htb_sched *q = qdisc_priv(sch); htb_init() 1048 qdisc_watchdog_init(&q->watchdog, sch); htb_init() 1055 q->direct_qlen = qdisc_dev(sch)->tx_queue_len; htb_init() 1064 static int htb_dump(struct Qdisc *sch, struct sk_buff *skb) htb_dump() argument 1066 struct htb_sched *q = qdisc_priv(sch); htb_dump() 1094 static int htb_dump_class(struct Qdisc *sch, unsigned long arg, htb_dump_class() argument 1139 htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d) htb_dump_class_stats() argument 1157 static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, htb_graft() argument 1165 (new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, htb_graft() 1169 *old = qdisc_replace(sch, new, &cl->un.leaf.q); htb_graft() 1173 static struct Qdisc *htb_leaf(struct Qdisc *sch, unsigned long arg) htb_leaf() argument 1179 static void htb_qlen_notify(struct Qdisc *sch, unsigned long arg) htb_qlen_notify() argument 1184 htb_deactivate(qdisc_priv(sch), cl); htb_qlen_notify() 1187 static unsigned long htb_get(struct Qdisc *sch, u32 classid) htb_get() argument 1189 struct htb_class *cl = htb_find(classid, sch); htb_get() 1227 static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl) htb_destroy_class() argument 1238 static void htb_destroy(struct Qdisc *sch) htb_destroy() argument 1240 struct htb_sched *q = qdisc_priv(sch); htb_destroy() 1261 htb_destroy_class(sch, cl); htb_destroy() 1267 static int htb_delete(struct Qdisc *sch, unsigned long arg) htb_delete() argument 1269 struct htb_sched *q = qdisc_priv(sch); htb_delete() 1282 new_q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, htb_delete() 1287 sch_tree_lock(sch); htb_delete() 1318 sch_tree_unlock(sch); htb_delete() 1322 static void htb_put(struct Qdisc *sch, unsigned long arg) htb_put() argument 1327 htb_destroy_class(sch, cl); htb_put() 1330 static int htb_change_class(struct Qdisc *sch, u32 classid, htb_change_class() argument 1335 struct htb_sched *q = qdisc_priv(sch); htb_change_class() 1354 parent = parentid == TC_H_ROOT ? NULL : htb_find(parentid, sch); htb_change_class() 1386 if (!classid || TC_H_MAJ(classid ^ sch->handle) || htb_change_class() 1387 htb_find(classid, sch)) htb_change_class() 1403 qdisc_root_sleeping_lock(sch), htb_change_class() 1423 new_q = qdisc_create_dflt(sch->dev_queue, htb_change_class() 1425 sch_tree_lock(sch); htb_change_class() 1465 spinlock_t *lock = qdisc_root_sleeping_lock(sch); htb_change_class() 1474 sch_tree_lock(sch); htb_change_class() 1512 sch_tree_unlock(sch); htb_change_class() 1514 qdisc_class_hash_grow(sch, &q->clhash); htb_change_class() 1523 static struct tcf_proto __rcu **htb_find_tcf(struct Qdisc *sch, htb_find_tcf() argument 1526 struct htb_sched *q = qdisc_priv(sch); htb_find_tcf() 1533 static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent, htb_bind_filter() argument 1536 struct htb_class *cl = htb_find(classid, sch); htb_bind_filter() 1552 static void htb_unbind_filter(struct Qdisc *sch, unsigned long arg) htb_unbind_filter() argument 1560 static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg) htb_walk() argument 1562 struct htb_sched *q = qdisc_priv(sch); htb_walk() 1575 if (arg->fn(sch, (unsigned long)cl, arg) < 0) { htb_walk()
|
H A D | sch_qfq.c | 209 static struct qfq_class *qfq_find_class(struct Qdisc *sch, u32 classid) qfq_find_class() argument 211 struct qfq_sched *q = qdisc_priv(sch); qfq_find_class() 386 static int qfq_change_agg(struct Qdisc *sch, struct qfq_class *cl, u32 weight, qfq_change_agg() argument 389 struct qfq_sched *q = qdisc_priv(sch); qfq_change_agg() 404 static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, qfq_change_class() argument 407 struct qfq_sched *q = qdisc_priv(sch); qfq_change_class() 441 lmax = psched_mtu(qdisc_dev(sch)); qfq_change_class() 463 qdisc_root_sleeping_lock(sch), qfq_change_class() 481 cl->qdisc = qdisc_create_dflt(sch->dev_queue, qfq_change_class() 489 qdisc_root_sleeping_lock(sch), qfq_change_class() 495 sch_tree_lock(sch); qfq_change_class() 497 sch_tree_unlock(sch); qfq_change_class() 499 qdisc_class_hash_grow(sch, &q->clhash); qfq_change_class() 502 sch_tree_lock(sch); qfq_change_class() 505 sch_tree_unlock(sch); qfq_change_class() 512 sch_tree_lock(sch); qfq_change_class() 518 sch_tree_unlock(sch); qfq_change_class() 529 static void qfq_destroy_class(struct Qdisc *sch, struct qfq_class *cl) qfq_destroy_class() argument 531 struct qfq_sched *q = qdisc_priv(sch); qfq_destroy_class() 539 static int qfq_delete_class(struct Qdisc *sch, unsigned long arg) qfq_delete_class() argument 541 struct qfq_sched *q = qdisc_priv(sch); qfq_delete_class() 547 sch_tree_lock(sch); qfq_delete_class() 558 sch_tree_unlock(sch); qfq_delete_class() 562 static unsigned long qfq_get_class(struct Qdisc *sch, u32 classid) qfq_get_class() argument 564 struct qfq_class *cl = qfq_find_class(sch, classid); qfq_get_class() 572 static void qfq_put_class(struct Qdisc *sch, unsigned long arg) qfq_put_class() argument 577 qfq_destroy_class(sch, cl); qfq_put_class() 580 static struct tcf_proto __rcu **qfq_tcf_chain(struct Qdisc *sch, qfq_tcf_chain() argument 583 struct qfq_sched *q = qdisc_priv(sch); qfq_tcf_chain() 591 static unsigned long qfq_bind_tcf(struct Qdisc *sch, unsigned long parent, qfq_bind_tcf() argument 594 struct qfq_class *cl = qfq_find_class(sch, classid); qfq_bind_tcf() 602 static void qfq_unbind_tcf(struct Qdisc *sch, unsigned long arg) qfq_unbind_tcf() argument 609 static int qfq_graft_class(struct Qdisc *sch, unsigned long arg, qfq_graft_class() argument 615 new = qdisc_create_dflt(sch->dev_queue, qfq_graft_class() 621 *old = qdisc_replace(sch, new, &cl->qdisc); qfq_graft_class() 625 static struct Qdisc *qfq_class_leaf(struct Qdisc *sch, unsigned long arg) qfq_class_leaf() argument 632 static int qfq_dump_class(struct Qdisc *sch, unsigned long arg, qfq_dump_class() argument 655 static int qfq_dump_class_stats(struct Qdisc *sch, unsigned long arg, qfq_dump_class_stats() argument 675 static void qfq_walk(struct Qdisc *sch, struct qdisc_walker *arg) qfq_walk() argument 677 struct qfq_sched *q = qdisc_priv(sch); qfq_walk() 690 if (arg->fn(sch, (unsigned long)cl, arg) < 0) { qfq_walk() 699 static struct qfq_class *qfq_classify(struct sk_buff *skb, struct Qdisc *sch, qfq_classify() argument 702 struct qfq_sched *q = qdisc_priv(sch); qfq_classify() 708 if (TC_H_MAJ(skb->priority ^ sch->handle) == 0) { qfq_classify() 710 cl = qfq_find_class(sch, skb->priority); qfq_classify() 730 cl = qfq_find_class(sch, res.classid); qfq_classify() 1098 static struct sk_buff *qfq_dequeue(struct Qdisc *sch) qfq_dequeue() argument 1100 struct qfq_sched *q = qdisc_priv(sch); qfq_dequeue() 1138 } else if (sch->q.qlen == 0) { /* no aggregate to serve */ qfq_dequeue() 1153 sch->q.qlen--; qfq_dequeue() 1154 qdisc_bstats_update(sch, skb); qfq_dequeue() 1217 static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) qfq_enqueue() argument 1219 struct qfq_sched *q = qdisc_priv(sch); qfq_enqueue() 1224 cl = qfq_classify(skb, sch, &err); qfq_enqueue() 1227 qdisc_qstats_drop(sch); qfq_enqueue() 1236 err = qfq_change_agg(sch, cl, cl->agg->class_weight, qfq_enqueue() 1247 qdisc_qstats_drop(sch); qfq_enqueue() 1253 ++sch->q.qlen; qfq_enqueue() 1414 static void qfq_qlen_notify(struct Qdisc *sch, unsigned long arg) qfq_qlen_notify() argument 1416 struct qfq_sched *q = qdisc_priv(sch); qfq_qlen_notify() 1448 static unsigned int qfq_drop(struct Qdisc *sch) qfq_drop() argument 1450 struct qfq_sched *q = qdisc_priv(sch); qfq_drop() 1459 sch->q.qlen--; qfq_drop() 1469 static int qfq_init_qdisc(struct Qdisc *sch, struct nlattr *opt) qfq_init_qdisc() argument 1471 struct qfq_sched *q = qdisc_priv(sch); qfq_init_qdisc() 1480 if (qdisc_dev(sch)->tx_queue_len + 1 > QFQ_MAX_AGG_CLASSES) qfq_init_qdisc() 1483 max_classes = qdisc_dev(sch)->tx_queue_len + 1; qfq_init_qdisc() 1505 static void qfq_reset_qdisc(struct Qdisc *sch) qfq_reset_qdisc() argument 1507 struct qfq_sched *q = qdisc_priv(sch); qfq_reset_qdisc() 1519 sch->q.qlen = 0; qfq_reset_qdisc() 1522 static void qfq_destroy_qdisc(struct Qdisc *sch) qfq_destroy_qdisc() argument 1524 struct qfq_sched *q = qdisc_priv(sch); qfq_destroy_qdisc() 1534 qfq_destroy_class(sch, cl); qfq_destroy_qdisc()
|
H A D | sch_api.c | 651 void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash) qdisc_class_hash_grow() argument 671 sch_tree_lock(sch); qdisc_class_hash_grow() 681 sch_tree_unlock(sch); qdisc_class_hash_grow() 747 void qdisc_tree_reduce_backlog(struct Qdisc *sch, unsigned int n, qdisc_tree_reduce_backlog() argument 759 while ((parentid = sch->parent)) { qdisc_tree_reduce_backlog() 763 if (sch->flags & TCQ_F_NOPARENT) qdisc_tree_reduce_backlog() 766 sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid)); qdisc_tree_reduce_backlog() 767 if (sch == NULL) { qdisc_tree_reduce_backlog() 771 cops = sch->ops->cl_ops; qdisc_tree_reduce_backlog() 773 cl = cops->get(sch, parentid); qdisc_tree_reduce_backlog() 774 cops->qlen_notify(sch, cl); qdisc_tree_reduce_backlog() 775 cops->put(sch, cl); qdisc_tree_reduce_backlog() 777 sch->q.qlen -= n; qdisc_tree_reduce_backlog() 778 sch->qstats.backlog -= len; qdisc_tree_reduce_backlog() 779 __qdisc_qstats_drop(sch, drops); qdisc_tree_reduce_backlog() 897 struct Qdisc *sch; qdisc_create() local 934 sch = qdisc_alloc(dev_queue, ops); qdisc_create() 935 if (IS_ERR(sch)) { qdisc_create() 936 err = PTR_ERR(sch); qdisc_create() 940 sch->parent = parent; qdisc_create() 943 sch->flags |= TCQ_F_INGRESS; qdisc_create() 945 lockdep_set_class(qdisc_lock(sch), &qdisc_rx_lock); qdisc_create() 953 lockdep_set_class(qdisc_lock(sch), &qdisc_tx_lock); qdisc_create() 955 sch->flags |= TCQ_F_ONETXQUEUE; qdisc_create() 958 sch->handle = handle; qdisc_create() 960 if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS])) == 0) { qdisc_create() 961 if (qdisc_is_percpu_stats(sch)) { qdisc_create() 962 sch->cpu_bstats = qdisc_create() 964 if (!sch->cpu_bstats) qdisc_create() 967 sch->cpu_qstats = alloc_percpu(struct gnet_stats_queue); qdisc_create() 968 if (!sch->cpu_qstats) qdisc_create() 978 rcu_assign_pointer(sch->stab, stab); qdisc_create() 984 if (sch->flags & TCQ_F_MQROOT) qdisc_create() 987 if ((sch->parent != TC_H_ROOT) && qdisc_create() 988 !(sch->flags & TCQ_F_INGRESS) && qdisc_create() 990 root_lock = qdisc_root_sleeping_lock(sch); qdisc_create() 992 root_lock = qdisc_lock(sch); qdisc_create() 994 err = gen_new_estimator(&sch->bstats, qdisc_create() 995 sch->cpu_bstats, qdisc_create() 996 &sch->rate_est, qdisc_create() 1003 qdisc_list_add(sch); qdisc_create() 1005 return sch; qdisc_create() 1009 kfree((char *) sch - sch->padded); qdisc_create() 1017 free_percpu(sch->cpu_bstats); qdisc_create() 1018 free_percpu(sch->cpu_qstats); qdisc_create() 1023 qdisc_put_stab(rtnl_dereference(sch->stab)); qdisc_create() 1025 ops->destroy(sch); qdisc_create() 1029 static int qdisc_change(struct Qdisc *sch, struct nlattr **tca) qdisc_change() argument 1035 if (sch->ops->change == NULL) qdisc_change() 1037 err = sch->ops->change(sch, tca[TCA_OPTIONS]); qdisc_change() 1048 ostab = rtnl_dereference(sch->stab); qdisc_change() 1049 rcu_assign_pointer(sch->stab, stab); qdisc_change() 1055 if (sch->flags & TCQ_F_MQROOT) qdisc_change() 1057 gen_replace_estimator(&sch->bstats, qdisc_change() 1058 sch->cpu_bstats, qdisc_change() 1059 &sch->rate_est, qdisc_change() 1060 qdisc_root_sleeping_lock(sch), qdisc_change()
|
H A D | sch_generic.c | 580 struct Qdisc *sch; qdisc_alloc() local 581 unsigned int size = QDISC_ALIGN(sizeof(*sch)) + ops->priv_size; qdisc_alloc() 590 sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p); qdisc_alloc() 592 if (sch != p) { qdisc_alloc() 598 sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p); qdisc_alloc() 599 sch->padded = (char *) sch - (char *) p; qdisc_alloc() 601 INIT_LIST_HEAD(&sch->list); qdisc_alloc() 602 skb_queue_head_init(&sch->q); qdisc_alloc() 604 spin_lock_init(&sch->busylock); qdisc_alloc() 605 lockdep_set_class(&sch->busylock, qdisc_alloc() 608 sch->ops = ops; qdisc_alloc() 609 sch->enqueue = ops->enqueue; qdisc_alloc() 610 sch->dequeue = ops->dequeue; qdisc_alloc() 611 sch->dev_queue = dev_queue; qdisc_alloc() 613 atomic_set(&sch->refcnt, 1); qdisc_alloc() 615 return sch; qdisc_alloc() 624 struct Qdisc *sch; qdisc_create_dflt() local 629 sch = qdisc_alloc(dev_queue, ops); qdisc_create_dflt() 630 if (IS_ERR(sch)) qdisc_create_dflt() 632 sch->parent = parentid; qdisc_create_dflt() 634 if (!ops->init || ops->init(sch, NULL) == 0) qdisc_create_dflt() 635 return sch; qdisc_create_dflt() 637 qdisc_destroy(sch); qdisc_create_dflt()
|
/linux-4.4.14/drivers/gpio/ |
H A D | gpio-sch.c | 46 static unsigned sch_gpio_offset(struct sch_gpio *sch, unsigned gpio, sch_gpio_offset() argument 51 if (gpio >= sch->resume_base) { sch_gpio_offset() 52 gpio -= sch->resume_base; sch_gpio_offset() 59 static unsigned sch_gpio_bit(struct sch_gpio *sch, unsigned gpio) sch_gpio_bit() argument 61 if (gpio >= sch->resume_base) sch_gpio_bit() 62 gpio -= sch->resume_base; sch_gpio_bit() 68 struct sch_gpio *sch = to_sch_gpio(gc); sch_gpio_reg_get() local 72 offset = sch_gpio_offset(sch, gpio, reg); sch_gpio_reg_get() 73 bit = sch_gpio_bit(sch, gpio); sch_gpio_reg_get() 75 reg_val = !!(inb(sch->iobase + offset) & BIT(bit)); sch_gpio_reg_get() 83 struct sch_gpio *sch = to_sch_gpio(gc); sch_gpio_reg_set() local 87 offset = sch_gpio_offset(sch, gpio, reg); sch_gpio_reg_set() 88 bit = sch_gpio_bit(sch, gpio); sch_gpio_reg_set() 90 reg_val = inb(sch->iobase + offset); sch_gpio_reg_set() 93 outb(reg_val | BIT(bit), sch->iobase + offset); sch_gpio_reg_set() 95 outb((reg_val & ~BIT(bit)), sch->iobase + offset); sch_gpio_reg_set() 100 struct sch_gpio *sch = to_sch_gpio(gc); sch_gpio_direction_in() local 102 spin_lock(&sch->lock); sch_gpio_direction_in() 104 spin_unlock(&sch->lock); sch_gpio_direction_in() 115 struct sch_gpio *sch = to_sch_gpio(gc); sch_gpio_set() local 117 spin_lock(&sch->lock); sch_gpio_set() 119 spin_unlock(&sch->lock); sch_gpio_set() 125 struct sch_gpio *sch = to_sch_gpio(gc); sch_gpio_direction_out() local 127 spin_lock(&sch->lock); sch_gpio_direction_out() 129 spin_unlock(&sch->lock); sch_gpio_direction_out() 155 struct sch_gpio *sch; sch_gpio_probe() local 158 sch = devm_kzalloc(&pdev->dev, sizeof(*sch), GFP_KERNEL); sch_gpio_probe() 159 if (!sch) sch_gpio_probe() 170 spin_lock_init(&sch->lock); sch_gpio_probe() 171 sch->iobase = res->start; sch_gpio_probe() 172 sch->chip = sch_gpio_chip; sch_gpio_probe() 173 sch->chip.label = dev_name(&pdev->dev); sch_gpio_probe() 174 sch->chip.dev = &pdev->dev; sch_gpio_probe() 178 sch->core_base = 0; sch_gpio_probe() 179 sch->resume_base = 10; sch_gpio_probe() 180 sch->chip.ngpio = 14; sch_gpio_probe() 187 sch_gpio_reg_set(&sch->chip, 8, GEN, 1); sch_gpio_probe() 188 sch_gpio_reg_set(&sch->chip, 9, GEN, 1); sch_gpio_probe() 193 sch_gpio_reg_set(&sch->chip, 13, GEN, 1); sch_gpio_probe() 197 sch->core_base = 0; sch_gpio_probe() 198 sch->resume_base = 5; sch_gpio_probe() 199 sch->chip.ngpio = 14; sch_gpio_probe() 203 sch->core_base = 0; sch_gpio_probe() 204 sch->resume_base = 21; sch_gpio_probe() 205 sch->chip.ngpio = 30; sch_gpio_probe() 209 sch->core_base = 0; sch_gpio_probe() 210 sch->resume_base = 2; sch_gpio_probe() 211 sch->chip.ngpio = 8; sch_gpio_probe() 218 platform_set_drvdata(pdev, sch); sch_gpio_probe() 220 return gpiochip_add(&sch->chip); sch_gpio_probe() 225 struct sch_gpio *sch = platform_get_drvdata(pdev); sch_gpio_remove() local 227 gpiochip_remove(&sch->chip); sch_gpio_remove()
|
/linux-4.4.14/include/net/ |
H A D | sch_generic.h | 494 const struct Qdisc *sch) qdisc_calculate_pkt_len() 497 struct qdisc_size_table *stab = rcu_dereference_bh(sch->stab); qdisc_calculate_pkt_len() 504 static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch) qdisc_enqueue() argument 506 qdisc_calculate_pkt_len(skb, sch); qdisc_enqueue() 507 return sch->enqueue(skb, sch); qdisc_enqueue() 530 static inline void qdisc_bstats_cpu_update(struct Qdisc *sch, qdisc_bstats_cpu_update() argument 533 bstats_cpu_update(this_cpu_ptr(sch->cpu_bstats), skb); qdisc_bstats_cpu_update() 536 static inline void qdisc_bstats_update(struct Qdisc *sch, qdisc_bstats_update() argument 539 bstats_update(&sch->bstats, skb); qdisc_bstats_update() 542 static inline void qdisc_qstats_backlog_dec(struct Qdisc *sch, qdisc_qstats_backlog_dec() argument 545 sch->qstats.backlog -= qdisc_pkt_len(skb); qdisc_qstats_backlog_dec() 548 static inline void qdisc_qstats_backlog_inc(struct Qdisc *sch, qdisc_qstats_backlog_inc() argument 551 sch->qstats.backlog += qdisc_pkt_len(skb); qdisc_qstats_backlog_inc() 554 static inline void __qdisc_qstats_drop(struct Qdisc *sch, int count) __qdisc_qstats_drop() argument 556 sch->qstats.drops += count; __qdisc_qstats_drop() 569 static inline void qdisc_qstats_drop(struct Qdisc *sch) qdisc_qstats_drop() argument 571 qstats_drop_inc(&sch->qstats); qdisc_qstats_drop() 574 static inline void qdisc_qstats_cpu_drop(struct Qdisc *sch) qdisc_qstats_cpu_drop() argument 576 qstats_drop_inc(this_cpu_ptr(sch->cpu_qstats)); qdisc_qstats_cpu_drop() 579 static inline void qdisc_qstats_overlimit(struct Qdisc *sch) qdisc_qstats_overlimit() argument 581 sch->qstats.overlimits++; qdisc_qstats_overlimit() 584 static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch, __qdisc_enqueue_tail() argument 588 qdisc_qstats_backlog_inc(sch, skb); __qdisc_enqueue_tail() 593 static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch) qdisc_enqueue_tail() argument 595 return __qdisc_enqueue_tail(skb, sch, &sch->q); qdisc_enqueue_tail() 598 static inline struct sk_buff *__qdisc_dequeue_head(struct Qdisc *sch, __qdisc_dequeue_head() argument 604 qdisc_qstats_backlog_dec(sch, skb); __qdisc_dequeue_head() 605 qdisc_bstats_update(sch, skb); __qdisc_dequeue_head() 611 static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch) qdisc_dequeue_head() argument 613 return __qdisc_dequeue_head(sch, &sch->q); qdisc_dequeue_head() 616 static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch, __qdisc_queue_drop_head() argument 623 qdisc_qstats_backlog_dec(sch, skb); __qdisc_queue_drop_head() 631 static inline unsigned int qdisc_queue_drop_head(struct Qdisc *sch) qdisc_queue_drop_head() argument 633 return __qdisc_queue_drop_head(sch, &sch->q); qdisc_queue_drop_head() 636 static inline struct sk_buff *__qdisc_dequeue_tail(struct Qdisc *sch, __qdisc_dequeue_tail() argument 642 qdisc_qstats_backlog_dec(sch, skb); __qdisc_dequeue_tail() 647 static inline struct sk_buff *qdisc_dequeue_tail(struct Qdisc *sch) qdisc_dequeue_tail() argument 649 return __qdisc_dequeue_tail(sch, &sch->q); qdisc_dequeue_tail() 652 static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch) qdisc_peek_head() argument 654 return skb_peek(&sch->q); qdisc_peek_head() 658 static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch) qdisc_peek_dequeued() argument 661 if (!sch->gso_skb) { qdisc_peek_dequeued() 662 sch->gso_skb = sch->dequeue(sch); qdisc_peek_dequeued() 663 if (sch->gso_skb) qdisc_peek_dequeued() 665 sch->q.qlen++; qdisc_peek_dequeued() 668 return sch->gso_skb; qdisc_peek_dequeued() 672 static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch) qdisc_dequeue_peeked() argument 674 struct sk_buff *skb = sch->gso_skb; qdisc_dequeue_peeked() 677 sch->gso_skb = NULL; qdisc_dequeue_peeked() 678 sch->q.qlen--; qdisc_dequeue_peeked() 680 skb = sch->dequeue(sch); qdisc_dequeue_peeked() 686 static inline void __qdisc_reset_queue(struct Qdisc *sch, __qdisc_reset_queue() argument 696 static inline void qdisc_reset_queue(struct Qdisc *sch) qdisc_reset_queue() argument 698 __qdisc_reset_queue(sch, &sch->q); qdisc_reset_queue() 699 sch->qstats.backlog = 0; qdisc_reset_queue() 702 static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new, qdisc_replace() argument 707 sch_tree_lock(sch); qdisc_replace() 714 sch_tree_unlock(sch); qdisc_replace() 719 static inline unsigned int __qdisc_queue_drop(struct Qdisc *sch, __qdisc_queue_drop() argument 722 struct sk_buff *skb = __qdisc_dequeue_tail(sch, list); __qdisc_queue_drop() 733 static inline unsigned int qdisc_queue_drop(struct Qdisc *sch) qdisc_queue_drop() argument 735 return __qdisc_queue_drop(sch, &sch->q); qdisc_queue_drop() 738 static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch) qdisc_drop() argument 741 qdisc_qstats_drop(sch); qdisc_drop() 746 static inline int qdisc_reshape_fail(struct sk_buff *skb, struct Qdisc *sch) qdisc_reshape_fail() argument 748 qdisc_qstats_drop(sch); qdisc_reshape_fail() 751 if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch)) qdisc_reshape_fail() 493 qdisc_calculate_pkt_len(struct sk_buff *skb, const struct Qdisc *sch) qdisc_calculate_pkt_len() argument
|
H A D | codel.h | 180 const struct Qdisc *sch) codel_params_init() 184 params->mtu = psched_mtu(qdisc_dev(sch)); codel_params_init() 230 struct Qdisc *sch, codel_should_drop() 244 sch->qstats.backlog -= qdisc_pkt_len(skb); codel_should_drop() 250 sch->qstats.backlog <= params->mtu) { codel_should_drop() 268 struct Qdisc *sch); 270 static struct sk_buff *codel_dequeue(struct Qdisc *sch, codel_dequeue() argument 276 struct sk_buff *skb = dequeue_func(vars, sch); codel_dequeue() 285 drop = codel_should_drop(skb, sch, vars, params, stats, now); codel_dequeue() 314 qdisc_drop(skb, sch); codel_dequeue() 316 skb = dequeue_func(vars, sch); codel_dequeue() 317 if (!codel_should_drop(skb, sch, codel_dequeue() 337 qdisc_drop(skb, sch); codel_dequeue() 340 skb = dequeue_func(vars, sch); codel_dequeue() 341 drop = codel_should_drop(skb, sch, vars, params, codel_dequeue() 179 codel_params_init(struct codel_params *params, const struct Qdisc *sch) codel_params_init() argument 229 codel_should_drop(const struct sk_buff *skb, struct Qdisc *sch, struct codel_vars *vars, struct codel_params *params, struct codel_stats *stats, codel_time_t now) codel_should_drop() argument
|
H A D | pkt_sched.h | 84 struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops,
|
/linux-4.4.14/net/netfilter/ |
H A D | xt_sctp.c | 45 const sctp_chunkhdr_t *sch; match_packet() local 59 sch = skb_header_pointer(skb, offset, sizeof(_sch), &_sch); match_packet() 60 if (sch == NULL || sch->length == 0) { match_packet() 68 ++i, offset, sch->type, htons(sch->length), match_packet() 69 sch->flags); match_packet() 71 offset += WORD_ROUND(ntohs(sch->length)); match_packet() 75 if (SCTP_CHUNKMAP_IS_SET(info->chunkmap, sch->type)) { match_packet() 79 sch->type, sch->flags)) { match_packet() 86 sch->type, sch->flags)) match_packet() 87 SCTP_CHUNKMAP_CLEAR(chunkmapcopy, sch->type); match_packet() 92 sch->type, sch->flags)) match_packet()
|
H A D | nf_conntrack_proto_sctp.c | 203 #define for_each_sctp_chunk(skb, sch, _sch, offset, dataoff, count) \ 206 ((sch) = skb_header_pointer((skb), (offset), sizeof(_sch), &(_sch))); \ 207 (offset) += (ntohs((sch)->length) + 3) & ~3, (count)++) 216 sctp_chunkhdr_t _sch, *sch; do_basic_checks() local 221 for_each_sctp_chunk (skb, sch, _sch, offset, dataoff, count) { for_each_sctp_chunk() 222 pr_debug("Chunk Num: %d Type: %d\n", count, sch->type); for_each_sctp_chunk() 224 if (sch->type == SCTP_CID_INIT || for_each_sctp_chunk() 225 sch->type == SCTP_CID_INIT_ACK || for_each_sctp_chunk() 226 sch->type == SCTP_CID_SHUTDOWN_COMPLETE) for_each_sctp_chunk() 234 if (((sch->type == SCTP_CID_COOKIE_ACK || for_each_sctp_chunk() 235 sch->type == SCTP_CID_COOKIE_ECHO || for_each_sctp_chunk() 237 count != 0) || !sch->length) { for_each_sctp_chunk() 243 set_bit(sch->type, map); for_each_sctp_chunk() 335 const struct sctp_chunkhdr *sch; sctp_packet() local 362 for_each_sctp_chunk (skb, sch, _sch, offset, dataoff, count) { for_each_sctp_chunk() 364 if (sch->type == SCTP_CID_INIT) { for_each_sctp_chunk() 368 } else if (sch->type == SCTP_CID_ABORT) { for_each_sctp_chunk() 373 } else if (sch->type == SCTP_CID_SHUTDOWN_COMPLETE) { for_each_sctp_chunk() 377 sch->flags & SCTP_CHUNK_FLAG_T) for_each_sctp_chunk() 379 } else if (sch->type == SCTP_CID_COOKIE_ECHO) { for_each_sctp_chunk() 383 } else if (sch->type == SCTP_CID_HEARTBEAT || for_each_sctp_chunk() 384 sch->type == SCTP_CID_HEARTBEAT_ACK) { for_each_sctp_chunk() 396 new_state = sctp_new_state(dir, old_state, sch->type); for_each_sctp_chunk() 402 dir, sch->type, old_state); for_each_sctp_chunk() 407 if (sch->type == SCTP_CID_INIT || for_each_sctp_chunk() 408 sch->type == SCTP_CID_INIT_ACK) { for_each_sctp_chunk() 451 const struct sctp_chunkhdr *sch; sctp_new() local 471 for_each_sctp_chunk (skb, sch, _sch, offset, dataoff, count) { for_each_sctp_chunk() 474 SCTP_CONNTRACK_NONE, sch->type); for_each_sctp_chunk() 484 if (sch->type == SCTP_CID_INIT) { for_each_sctp_chunk() 502 } else if (sch->type == SCTP_CID_HEARTBEAT) { for_each_sctp_chunk()
|
/linux-4.4.14/include/uapi/linux/netfilter_ipv6/ |
H A D | ip6t_ipv6header.h | 5 * Rewritten by: Andras Kis-Szabo <kisza@sch.bme.hu> */
|
/linux-4.4.14/net/ipv6/netfilter/ |
H A D | ip6t_eui64.c | 3 /* (C) 2001-2002 Andras Kis-Szabo <kisza@sch.bme.hu> 20 MODULE_AUTHOR("Andras Kis-Szabo <kisza@sch.bme.hu>");
|
H A D | ip6t_ipv6header.c | 5 * Rewritten by: Andras Kis-Szabo <kisza@sch.bme.hu> */ 7 /* (C) 2001-2002 Andras Kis-Szabo <kisza@sch.bme.hu> 27 MODULE_AUTHOR("Andras Kis-Szabo <kisza@sch.bme.hu>");
|
H A D | ip6t_ah.c | 3 /* (C) 2001-2002 Andras Kis-Szabo <kisza@sch.bme.hu> 24 MODULE_AUTHOR("Andras Kis-Szabo <kisza@sch.bme.hu>");
|
H A D | ip6t_frag.c | 3 /* (C) 2001-2002 Andras Kis-Szabo <kisza@sch.bme.hu> 23 MODULE_AUTHOR("Andras Kis-Szabo <kisza@sch.bme.hu>");
|
H A D | ip6t_hbh.c | 3 /* (C) 2001-2002 Andras Kis-Szabo <kisza@sch.bme.hu> 25 MODULE_AUTHOR("Andras Kis-Szabo <kisza@sch.bme.hu>");
|
H A D | ip6t_rt.c | 3 /* (C) 2001-2002 Andras Kis-Szabo <kisza@sch.bme.hu> 25 MODULE_AUTHOR("Andras Kis-Szabo <kisza@sch.bme.hu>");
|
/linux-4.4.14/net/netfilter/ipvs/ |
H A D | ip_vs_proto_sctp.c | 18 sctp_chunkhdr_t _schunkh, *sch; sctp_conn_schedule() local 25 sch = skb_header_pointer( sctp_conn_schedule() 28 if (sch && (sch->type == SCTP_CID_INIT || sctp_conn_schedule() 381 sctp_chunkhdr_t _sctpch, *sch; set_sctp_state() local 393 sch = skb_header_pointer(skb, cofs, sizeof(_sctpch), &_sctpch); set_sctp_state() 394 if (sch == NULL) set_sctp_state() 397 chunk_type = sch->type; set_sctp_state() 409 if ((sch->type == SCTP_CID_COOKIE_ECHO) || set_sctp_state() 410 (sch->type == SCTP_CID_COOKIE_ACK)) { set_sctp_state() 411 int clen = ntohs(sch->length); set_sctp_state() 414 sch = skb_header_pointer(skb, cofs + ALIGN(clen, 4), set_sctp_state() 416 if (sch && sch->type == SCTP_CID_ABORT) set_sctp_state() 417 chunk_type = sch->type; set_sctp_state()
|
H A D | ip_vs_core.c | 1036 sctp_chunkhdr_t *sch, schunk; is_sctp_abort() local 1037 sch = skb_header_pointer(skb, nh_len + sizeof(sctp_sctphdr_t), is_sctp_abort() 1039 if (sch == NULL) is_sctp_abort() 1041 if (sch->type == SCTP_CID_ABORT) is_sctp_abort() 1069 sctp_chunkhdr_t *sch, schunk; is_new_conn() local 1071 sch = skb_header_pointer(skb, iph->len + sizeof(sctp_sctphdr_t), is_new_conn() 1073 if (sch == NULL) is_new_conn() 1075 return sch->type == SCTP_CID_INIT; is_new_conn()
|
/linux-4.4.14/arch/mips/include/asm/netlogic/xlp-hal/ |
H A D | pic.h | 229 int sch, int vec, int dt, int db, int cpu) nlm_9xx_pic_write_irt() 243 int sch, int vec, int dt, int db, int dte) nlm_pic_write_irt() 248 ((sch & 0x1) << 28) | ((vec & 0x3f) << 20) | nlm_pic_write_irt() 257 int sch, int vec, int cpu) nlm_pic_write_irt_direct() 260 nlm_9xx_pic_write_irt(base, irt_num, en, nmi, sch, vec, nlm_pic_write_irt_direct() 263 nlm_pic_write_irt(base, irt_num, en, nmi, sch, vec, 1, nlm_pic_write_irt_direct() 228 nlm_9xx_pic_write_irt(uint64_t base, int irt_num, int en, int nmi, int sch, int vec, int dt, int db, int cpu) nlm_9xx_pic_write_irt() argument 242 nlm_pic_write_irt(uint64_t base, int irt_num, int en, int nmi, int sch, int vec, int dt, int db, int dte) nlm_pic_write_irt() argument 256 nlm_pic_write_irt_direct(uint64_t base, int irt_num, int en, int nmi, int sch, int vec, int cpu) nlm_pic_write_irt_direct() argument
|
/linux-4.4.14/drivers/i2c/busses/ |
H A D | i2c-isch.c | 140 * This is the main access entry for i2c-sch access
|
/linux-4.4.14/arch/s390/include/asm/ |
H A D | qdio.h | 289 u16 sch; member in struct:qdio_ssqd_desc
|
/linux-4.4.14/drivers/net/wireless/iwlwifi/pcie/ |
H A D | internal.h | 81 u32 sch; member in struct:isr_statistics
|
H A D | rx.c | 1292 isr_stats->sch++; iwl_pcie_irq_handler()
|
H A D | trans.c | 2025 isr_stats->sch); iwl_dbgfs_interrupt_read()
|
/linux-4.4.14/drivers/net/wireless/iwlegacy/ |
H A D | debug.c | 703 il->isr_stats.sch); il_dbgfs_interrupt_read()
|
H A D | common.h | 1024 u32 sch; member in struct:isr_stats
|
H A D | 3945-mac.c | 1465 il->isr_stats.sch++; il3945_irq_tasklet()
|
H A D | 4965-mac.c | 4428 il->isr_stats.sch++; il4965_irq_tasklet()
|
/linux-4.4.14/drivers/usb/storage/ |
H A D | unusual_devs.h | 1989 /* Reported by Michael Büsch <m@bues.ch> */
|
/linux-4.4.14/arch/mips/include/asm/octeon/ |
H A D | cvmx-mio-defs.h | 2340 uint64_t sch:4; member in struct:cvmx_mio_fus_read_times::cvmx_mio_fus_read_times_s 2350 uint64_t sch:4;
|