sch               229 arch/mips/include/asm/netlogic/xlp-hal/pic.h 	int sch, int vec, int dt, int db, int cpu)
sch               243 arch/mips/include/asm/netlogic/xlp-hal/pic.h 	int sch, int vec, int dt, int db, int dte)
sch               248 arch/mips/include/asm/netlogic/xlp-hal/pic.h 			((sch & 0x1) << 28) | ((vec & 0x3f) << 20) |
sch               257 arch/mips/include/asm/netlogic/xlp-hal/pic.h 	int sch, int vec, int cpu)
sch               260 arch/mips/include/asm/netlogic/xlp-hal/pic.h 		nlm_9xx_pic_write_irt(base, irt_num, en, nmi, sch, vec,
sch               263 arch/mips/include/asm/netlogic/xlp-hal/pic.h 		nlm_pic_write_irt(base, irt_num, en, nmi, sch, vec, 1,
sch              2320 arch/mips/include/asm/octeon/cvmx-mio-defs.h 		uint64_t sch:4;
sch              2330 arch/mips/include/asm/octeon/cvmx-mio-defs.h 		uint64_t sch:4;
sch               286 arch/s390/include/asm/qdio.h 	u16 sch;
sch                29 drivers/gpio/gpio-sch.c static unsigned sch_gpio_offset(struct sch_gpio *sch, unsigned gpio,
sch                34 drivers/gpio/gpio-sch.c 	if (gpio >= sch->resume_base) {
sch                35 drivers/gpio/gpio-sch.c 		gpio -= sch->resume_base;
sch                42 drivers/gpio/gpio-sch.c static unsigned sch_gpio_bit(struct sch_gpio *sch, unsigned gpio)
sch                44 drivers/gpio/gpio-sch.c 	if (gpio >= sch->resume_base)
sch                45 drivers/gpio/gpio-sch.c 		gpio -= sch->resume_base;
sch                49 drivers/gpio/gpio-sch.c static int sch_gpio_reg_get(struct sch_gpio *sch, unsigned gpio, unsigned reg)
sch                54 drivers/gpio/gpio-sch.c 	offset = sch_gpio_offset(sch, gpio, reg);
sch                55 drivers/gpio/gpio-sch.c 	bit = sch_gpio_bit(sch, gpio);
sch                57 drivers/gpio/gpio-sch.c 	reg_val = !!(inb(sch->iobase + offset) & BIT(bit));
sch                62 drivers/gpio/gpio-sch.c static void sch_gpio_reg_set(struct sch_gpio *sch, unsigned gpio, unsigned reg,
sch                68 drivers/gpio/gpio-sch.c 	offset = sch_gpio_offset(sch, gpio, reg);
sch                69 drivers/gpio/gpio-sch.c 	bit = sch_gpio_bit(sch, gpio);
sch                71 drivers/gpio/gpio-sch.c 	reg_val = inb(sch->iobase + offset);
sch                74 drivers/gpio/gpio-sch.c 		outb(reg_val | BIT(bit), sch->iobase + offset);
sch                76 drivers/gpio/gpio-sch.c 		outb((reg_val & ~BIT(bit)), sch->iobase + offset);
sch                81 drivers/gpio/gpio-sch.c 	struct sch_gpio *sch = gpiochip_get_data(gc);
sch                83 drivers/gpio/gpio-sch.c 	spin_lock(&sch->lock);
sch                84 drivers/gpio/gpio-sch.c 	sch_gpio_reg_set(sch, gpio_num, GIO, 1);
sch                85 drivers/gpio/gpio-sch.c 	spin_unlock(&sch->lock);
sch                91 drivers/gpio/gpio-sch.c 	struct sch_gpio *sch = gpiochip_get_data(gc);
sch                92 drivers/gpio/gpio-sch.c 	return sch_gpio_reg_get(sch, gpio_num, GLV);
sch                97 drivers/gpio/gpio-sch.c 	struct sch_gpio *sch = gpiochip_get_data(gc);
sch                99 drivers/gpio/gpio-sch.c 	spin_lock(&sch->lock);
sch               100 drivers/gpio/gpio-sch.c 	sch_gpio_reg_set(sch, gpio_num, GLV, val);
sch               101 drivers/gpio/gpio-sch.c 	spin_unlock(&sch->lock);
sch               107 drivers/gpio/gpio-sch.c 	struct sch_gpio *sch = gpiochip_get_data(gc);
sch               109 drivers/gpio/gpio-sch.c 	spin_lock(&sch->lock);
sch               110 drivers/gpio/gpio-sch.c 	sch_gpio_reg_set(sch, gpio_num, GIO, 0);
sch               111 drivers/gpio/gpio-sch.c 	spin_unlock(&sch->lock);
sch               128 drivers/gpio/gpio-sch.c 	struct sch_gpio *sch = gpiochip_get_data(gc);
sch               130 drivers/gpio/gpio-sch.c 	return sch_gpio_reg_get(sch, gpio_num, GIO);
sch               145 drivers/gpio/gpio-sch.c 	struct sch_gpio *sch;
sch               148 drivers/gpio/gpio-sch.c 	sch = devm_kzalloc(&pdev->dev, sizeof(*sch), GFP_KERNEL);
sch               149 drivers/gpio/gpio-sch.c 	if (!sch)
sch               160 drivers/gpio/gpio-sch.c 	spin_lock_init(&sch->lock);
sch               161 drivers/gpio/gpio-sch.c 	sch->iobase = res->start;
sch               162 drivers/gpio/gpio-sch.c 	sch->chip = sch_gpio_chip;
sch               163 drivers/gpio/gpio-sch.c 	sch->chip.label = dev_name(&pdev->dev);
sch               164 drivers/gpio/gpio-sch.c 	sch->chip.parent = &pdev->dev;
sch               168 drivers/gpio/gpio-sch.c 		sch->resume_base = 10;
sch               169 drivers/gpio/gpio-sch.c 		sch->chip.ngpio = 14;
sch               176 drivers/gpio/gpio-sch.c 		sch_gpio_reg_set(sch, 8, GEN, 1);
sch               177 drivers/gpio/gpio-sch.c 		sch_gpio_reg_set(sch, 9, GEN, 1);
sch               182 drivers/gpio/gpio-sch.c 		sch_gpio_reg_set(sch, 13, GEN, 1);
sch               186 drivers/gpio/gpio-sch.c 		sch->resume_base = 5;
sch               187 drivers/gpio/gpio-sch.c 		sch->chip.ngpio = 14;
sch               191 drivers/gpio/gpio-sch.c 		sch->resume_base = 21;
sch               192 drivers/gpio/gpio-sch.c 		sch->chip.ngpio = 30;
sch               196 drivers/gpio/gpio-sch.c 		sch->resume_base = 2;
sch               197 drivers/gpio/gpio-sch.c 		sch->chip.ngpio = 8;
sch               204 drivers/gpio/gpio-sch.c 	platform_set_drvdata(pdev, sch);
sch               206 drivers/gpio/gpio-sch.c 	return devm_gpiochip_add_data(&pdev->dev, &sch->chip, sch);
sch              1443 drivers/net/wireless/intel/iwlegacy/3945-mac.c 			il->isr_stats.sch++;
sch              4412 drivers/net/wireless/intel/iwlegacy/4965-mac.c 			il->isr_stats.sch++;
sch              1010 drivers/net/wireless/intel/iwlegacy/common.h 	u32 sch;
sch               672 drivers/net/wireless/intel/iwlegacy/debug.c 		      il->isr_stats.sch);
sch               126 drivers/net/wireless/intel/iwlwifi/pcie/internal.h 	u32 sch;
sch              1843 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		isr_stats->sch++;
sch              2667 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 		isr_stats->sch);
sch                78 drivers/s390/cio/ccwreq.c 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
sch                90 drivers/s390/cio/ccwreq.c 		rc = cio_start(sch, cp, (u8) req->mask);
sch               106 drivers/s390/cio/ccwreq.c 		rc = cio_clear(sch);
sch               153 drivers/s390/cio/ccwreq.c 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
sch               160 drivers/s390/cio/ccwreq.c 	rc = cio_clear(sch);
sch               329 drivers/s390/cio/ccwreq.c 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
sch               333 drivers/s390/cio/ccwreq.c 	if (cio_update_schib(sch))
sch               337 drivers/s390/cio/ccwreq.c 		if ((0x80 >> chp) & sch->schib.pmcw.lpum)
sch               340 drivers/s390/cio/ccwreq.c 				scsw_cstat(&sch->schib.scsw),
sch               341 drivers/s390/cio/ccwreq.c 				scsw_dstat(&sch->schib.scsw),
sch               342 drivers/s390/cio/ccwreq.c 				sch->schid.cssid,
sch               343 drivers/s390/cio/ccwreq.c 				sch->schib.pmcw.chpid[chp]);
sch               350 drivers/s390/cio/ccwreq.c 	rc = cio_clear(sch);
sch                75 drivers/s390/cio/chp.c u8 chp_get_sch_opm(struct subchannel *sch)
sch                85 drivers/s390/cio/chp.c 		chpid.id = sch->schib.pmcw.chpid[i];
sch                63 drivers/s390/cio/chp.h u8 chp_get_sch_opm(struct subchannel *sch);
sch                92 drivers/s390/cio/chsc.c 	u16 sch;	  /* subchannel */
sch               213 drivers/s390/cio/chsc.c static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data)
sch               215 drivers/s390/cio/chsc.c 	spin_lock_irq(sch->lock);
sch               216 drivers/s390/cio/chsc.c 	if (sch->driver && sch->driver->chp_event)
sch               217 drivers/s390/cio/chsc.c 		if (sch->driver->chp_event(sch, data, CHP_OFFLINE) != 0)
sch               219 drivers/s390/cio/chsc.c 	spin_unlock_irq(sch->lock);
sch               223 drivers/s390/cio/chsc.c 	sch->lpm = 0;
sch               224 drivers/s390/cio/chsc.c 	spin_unlock_irq(sch->lock);
sch               225 drivers/s390/cio/chsc.c 	css_schedule_eval(sch->schid);
sch               252 drivers/s390/cio/chsc.c static int __s390_process_res_acc(struct subchannel *sch, void *data)
sch               254 drivers/s390/cio/chsc.c 	spin_lock_irq(sch->lock);
sch               255 drivers/s390/cio/chsc.c 	if (sch->driver && sch->driver->chp_event)
sch               256 drivers/s390/cio/chsc.c 		sch->driver->chp_event(sch, data, CHP_ONLINE);
sch               257 drivers/s390/cio/chsc.c 	spin_unlock_irq(sch->lock);
sch               716 drivers/s390/cio/chsc.c static void __s390_subchannel_vary_chpid(struct subchannel *sch,
sch               724 drivers/s390/cio/chsc.c 	spin_lock_irqsave(sch->lock, flags);
sch               725 drivers/s390/cio/chsc.c 	if (sch->driver && sch->driver->chp_event)
sch               726 drivers/s390/cio/chsc.c 		sch->driver->chp_event(sch, &link,
sch               728 drivers/s390/cio/chsc.c 	spin_unlock_irqrestore(sch->lock, flags);
sch               731 drivers/s390/cio/chsc.c static int s390_subchannel_vary_chpid_off(struct subchannel *sch, void *data)
sch               735 drivers/s390/cio/chsc.c 	__s390_subchannel_vary_chpid(sch, *chpid, 0);
sch               739 drivers/s390/cio/chsc.c static int s390_subchannel_vary_chpid_on(struct subchannel *sch, void *data)
sch               743 drivers/s390/cio/chsc.c 	__s390_subchannel_vary_chpid(sch, *chpid, 1);
sch              1358 drivers/s390/cio/chsc.c 	brinfo_area->sch   = schid.sch_no;
sch               229 drivers/s390/cio/chsc.h 	u16 sch;
sch                53 drivers/s390/cio/chsc_sch.c static void chsc_subchannel_irq(struct subchannel *sch)
sch                55 drivers/s390/cio/chsc_sch.c 	struct chsc_private *private = dev_get_drvdata(&sch->dev);
sch                66 drivers/s390/cio/chsc_sch.c 			 sch->schid.ssid, sch->schid.sch_no);
sch                71 drivers/s390/cio/chsc_sch.c 	cio_update_schib(sch);
sch                73 drivers/s390/cio/chsc_sch.c 	put_device(&sch->dev);
sch                76 drivers/s390/cio/chsc_sch.c static int chsc_subchannel_probe(struct subchannel *sch)
sch                82 drivers/s390/cio/chsc_sch.c 		 sch->schid.ssid, sch->schid.sch_no);
sch                83 drivers/s390/cio/chsc_sch.c 	sch->isc = CHSC_SCH_ISC;
sch                87 drivers/s390/cio/chsc_sch.c 	dev_set_drvdata(&sch->dev, private);
sch                88 drivers/s390/cio/chsc_sch.c 	ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
sch                91 drivers/s390/cio/chsc_sch.c 			 sch->schid.ssid, sch->schid.sch_no, ret);
sch                92 drivers/s390/cio/chsc_sch.c 		dev_set_drvdata(&sch->dev, NULL);
sch                95 drivers/s390/cio/chsc_sch.c 		if (dev_get_uevent_suppress(&sch->dev)) {
sch                96 drivers/s390/cio/chsc_sch.c 			dev_set_uevent_suppress(&sch->dev, 0);
sch                97 drivers/s390/cio/chsc_sch.c 			kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
sch               103 drivers/s390/cio/chsc_sch.c static int chsc_subchannel_remove(struct subchannel *sch)
sch               107 drivers/s390/cio/chsc_sch.c 	cio_disable_subchannel(sch);
sch               108 drivers/s390/cio/chsc_sch.c 	private = dev_get_drvdata(&sch->dev);
sch               109 drivers/s390/cio/chsc_sch.c 	dev_set_drvdata(&sch->dev, NULL);
sch               112 drivers/s390/cio/chsc_sch.c 		put_device(&sch->dev);
sch               118 drivers/s390/cio/chsc_sch.c static void chsc_subchannel_shutdown(struct subchannel *sch)
sch               120 drivers/s390/cio/chsc_sch.c 	cio_disable_subchannel(sch);
sch               123 drivers/s390/cio/chsc_sch.c static int chsc_subchannel_prepare(struct subchannel *sch)
sch               132 drivers/s390/cio/chsc_sch.c 	cc = stsch(sch->schid, &schib);
sch               138 drivers/s390/cio/chsc_sch.c static int chsc_subchannel_freeze(struct subchannel *sch)
sch               140 drivers/s390/cio/chsc_sch.c 	return cio_disable_subchannel(sch);
sch               143 drivers/s390/cio/chsc_sch.c static int chsc_subchannel_restore(struct subchannel *sch)
sch               145 drivers/s390/cio/chsc_sch.c 	return cio_enable_subchannel(sch, (u32)(unsigned long)sch);
sch               208 drivers/s390/cio/chsc_sch.c 	struct subchannel *sch = to_subchannel(dev);
sch               210 drivers/s390/cio/chsc_sch.c 	return sch->schib.pmcw.ena && !scsw_fctl(&sch->schib.scsw);
sch               213 drivers/s390/cio/chsc_sch.c static struct subchannel *chsc_get_next_subchannel(struct subchannel *sch)
sch               218 drivers/s390/cio/chsc_sch.c 				 sch ? &sch->dev : NULL, NULL,
sch               242 drivers/s390/cio/chsc_sch.c 	struct subchannel *sch = NULL;
sch               247 drivers/s390/cio/chsc_sch.c 	while ((sch = chsc_get_next_subchannel(sch))) {
sch               248 drivers/s390/cio/chsc_sch.c 		spin_lock(sch->lock);
sch               249 drivers/s390/cio/chsc_sch.c 		private = dev_get_drvdata(&sch->dev);
sch               251 drivers/s390/cio/chsc_sch.c 			spin_unlock(sch->lock);
sch               255 drivers/s390/cio/chsc_sch.c 		chsc_area->header.sid = sch->schid;
sch               257 drivers/s390/cio/chsc_sch.c 		CHSC_LOG_HEX(2, &sch->schid, sizeof(sch->schid));
sch               266 drivers/s390/cio/chsc_sch.c 			sch->schib.scsw.cmd.fctl |= SCSW_FCTL_START_FUNC;
sch               276 drivers/s390/cio/chsc_sch.c 		spin_unlock(sch->lock);
sch               278 drivers/s390/cio/chsc_sch.c 			 sch->schid.ssid, sch->schid.sch_no, cc);
sch               281 drivers/s390/cio/chsc_sch.c 		put_device(&sch->dev);
sch                88 drivers/s390/cio/cio.c int cio_set_options(struct subchannel *sch, int flags)
sch                90 drivers/s390/cio/cio.c 	struct io_subchannel_private *priv = to_io_private(sch);
sch                99 drivers/s390/cio/cio.c cio_start_handle_notoper(struct subchannel *sch, __u8 lpm)
sch               104 drivers/s390/cio/cio.c 		sch->lpm &= ~lpm;
sch               106 drivers/s390/cio/cio.c 		sch->lpm = 0;
sch               109 drivers/s390/cio/cio.c 		      "subchannel 0.%x.%04x!\n", sch->schid.ssid,
sch               110 drivers/s390/cio/cio.c 		      sch->schid.sch_no);
sch               112 drivers/s390/cio/cio.c 	if (cio_update_schib(sch))
sch               115 drivers/s390/cio/cio.c 	sprintf(dbf_text, "no%s", dev_name(&sch->dev));
sch               117 drivers/s390/cio/cio.c 	CIO_HEX_EVENT(0, &sch->schib, sizeof (struct schib));
sch               119 drivers/s390/cio/cio.c 	return (sch->lpm ? -EACCES : -ENODEV);
sch               123 drivers/s390/cio/cio.c cio_start_key (struct subchannel *sch,	/* subchannel structure */
sch               128 drivers/s390/cio/cio.c 	struct io_subchannel_private *priv = to_io_private(sch);
sch               133 drivers/s390/cio/cio.c 	CIO_TRACE_EVENT(5, dev_name(&sch->dev));
sch               137 drivers/s390/cio/cio.c 	orb->cmd.intparm = (u32)(addr_t)sch;
sch               143 drivers/s390/cio/cio.c 	orb->cmd.lpm = (lpm != 0) ? lpm : sch->lpm;
sch               152 drivers/s390/cio/cio.c 	ccode = ssch(sch->schid, orb);
sch               162 drivers/s390/cio/cio.c 		sch->schib.scsw.cmd.actl |= SCSW_ACTL_START_PEND;
sch               168 drivers/s390/cio/cio.c 		return cio_start_handle_notoper(sch, lpm);
sch               176 drivers/s390/cio/cio.c cio_start (struct subchannel *sch, struct ccw1 *cpa, __u8 lpm)
sch               178 drivers/s390/cio/cio.c 	return cio_start_key(sch, cpa, lpm, PAGE_DEFAULT_KEY);
sch               186 drivers/s390/cio/cio.c cio_resume (struct subchannel *sch)
sch               191 drivers/s390/cio/cio.c 	CIO_TRACE_EVENT(4, dev_name(&sch->dev));
sch               193 drivers/s390/cio/cio.c 	ccode = rsch (sch->schid);
sch               199 drivers/s390/cio/cio.c 		sch->schib.scsw.cmd.actl |= SCSW_ACTL_RESUME_PEND;
sch               219 drivers/s390/cio/cio.c cio_halt(struct subchannel *sch)
sch               223 drivers/s390/cio/cio.c 	if (!sch)
sch               227 drivers/s390/cio/cio.c 	CIO_TRACE_EVENT(2, dev_name(&sch->dev));
sch               232 drivers/s390/cio/cio.c 	ccode = hsch (sch->schid);
sch               238 drivers/s390/cio/cio.c 		sch->schib.scsw.cmd.actl |= SCSW_ACTL_HALT_PEND;
sch               253 drivers/s390/cio/cio.c cio_clear(struct subchannel *sch)
sch               257 drivers/s390/cio/cio.c 	if (!sch)
sch               261 drivers/s390/cio/cio.c 	CIO_TRACE_EVENT(2, dev_name(&sch->dev));
sch               266 drivers/s390/cio/cio.c 	ccode = csch (sch->schid);
sch               272 drivers/s390/cio/cio.c 		sch->schib.scsw.cmd.actl |= SCSW_ACTL_CLEAR_PEND;
sch               288 drivers/s390/cio/cio.c cio_cancel (struct subchannel *sch)
sch               292 drivers/s390/cio/cio.c 	if (!sch)
sch               296 drivers/s390/cio/cio.c 	CIO_TRACE_EVENT(2, dev_name(&sch->dev));
sch               298 drivers/s390/cio/cio.c 	ccode = xsch (sch->schid);
sch               305 drivers/s390/cio/cio.c 		if (cio_update_schib(sch))
sch               334 drivers/s390/cio/cio.c int cio_cancel_halt_clear(struct subchannel *sch, int *iretry)
sch               338 drivers/s390/cio/cio.c 	if (cio_update_schib(sch))
sch               340 drivers/s390/cio/cio.c 	if (!sch->schib.pmcw.ena)
sch               344 drivers/s390/cio/cio.c 	if (!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_HALT_PEND) &&
sch               345 drivers/s390/cio/cio.c 	    !(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_CLEAR_PEND)) {
sch               346 drivers/s390/cio/cio.c 		if (!scsw_is_tm(&sch->schib.scsw)) {
sch               347 drivers/s390/cio/cio.c 			ret = cio_cancel(sch);
sch               358 drivers/s390/cio/cio.c 	if (!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_CLEAR_PEND)) {
sch               361 drivers/s390/cio/cio.c 			ret = cio_halt(sch);
sch               371 drivers/s390/cio/cio.c 		ret = cio_clear(sch);
sch               379 drivers/s390/cio/cio.c static void cio_apply_config(struct subchannel *sch, struct schib *schib)
sch               381 drivers/s390/cio/cio.c 	schib->pmcw.intparm = sch->config.intparm;
sch               382 drivers/s390/cio/cio.c 	schib->pmcw.mbi = sch->config.mbi;
sch               383 drivers/s390/cio/cio.c 	schib->pmcw.isc = sch->config.isc;
sch               384 drivers/s390/cio/cio.c 	schib->pmcw.ena = sch->config.ena;
sch               385 drivers/s390/cio/cio.c 	schib->pmcw.mme = sch->config.mme;
sch               386 drivers/s390/cio/cio.c 	schib->pmcw.mp = sch->config.mp;
sch               387 drivers/s390/cio/cio.c 	schib->pmcw.csense = sch->config.csense;
sch               388 drivers/s390/cio/cio.c 	schib->pmcw.mbfc = sch->config.mbfc;
sch               389 drivers/s390/cio/cio.c 	if (sch->config.mbfc)
sch               390 drivers/s390/cio/cio.c 		schib->mba = sch->config.mba;
sch               393 drivers/s390/cio/cio.c static int cio_check_config(struct subchannel *sch, struct schib *schib)
sch               395 drivers/s390/cio/cio.c 	return (schib->pmcw.intparm == sch->config.intparm) &&
sch               396 drivers/s390/cio/cio.c 		(schib->pmcw.mbi == sch->config.mbi) &&
sch               397 drivers/s390/cio/cio.c 		(schib->pmcw.isc == sch->config.isc) &&
sch               398 drivers/s390/cio/cio.c 		(schib->pmcw.ena == sch->config.ena) &&
sch               399 drivers/s390/cio/cio.c 		(schib->pmcw.mme == sch->config.mme) &&
sch               400 drivers/s390/cio/cio.c 		(schib->pmcw.mp == sch->config.mp) &&
sch               401 drivers/s390/cio/cio.c 		(schib->pmcw.csense == sch->config.csense) &&
sch               402 drivers/s390/cio/cio.c 		(schib->pmcw.mbfc == sch->config.mbfc) &&
sch               403 drivers/s390/cio/cio.c 		(!sch->config.mbfc || (schib->mba == sch->config.mba));
sch               409 drivers/s390/cio/cio.c int cio_commit_config(struct subchannel *sch)
sch               415 drivers/s390/cio/cio.c 	if (stsch(sch->schid, &schib) || !css_sch_is_valid(&schib))
sch               420 drivers/s390/cio/cio.c 		cio_apply_config(sch, &schib);
sch               421 drivers/s390/cio/cio.c 		ccode = msch(sch->schid, &schib);
sch               426 drivers/s390/cio/cio.c 			if (stsch(sch->schid, &schib) ||
sch               429 drivers/s390/cio/cio.c 			if (cio_check_config(sch, &schib)) {
sch               431 drivers/s390/cio/cio.c 				memcpy(&sch->schib, &schib, sizeof(schib));
sch               438 drivers/s390/cio/cio.c 			if (tsch(sch->schid, &irb))
sch               458 drivers/s390/cio/cio.c int cio_update_schib(struct subchannel *sch)
sch               462 drivers/s390/cio/cio.c 	if (stsch(sch->schid, &schib) || !css_sch_is_valid(&schib))
sch               465 drivers/s390/cio/cio.c 	memcpy(&sch->schib, &schib, sizeof(schib));
sch               475 drivers/s390/cio/cio.c int cio_enable_subchannel(struct subchannel *sch, u32 intparm)
sch               480 drivers/s390/cio/cio.c 	CIO_TRACE_EVENT(2, dev_name(&sch->dev));
sch               482 drivers/s390/cio/cio.c 	if (sch_is_pseudo_sch(sch))
sch               484 drivers/s390/cio/cio.c 	if (cio_update_schib(sch))
sch               487 drivers/s390/cio/cio.c 	sch->config.ena = 1;
sch               488 drivers/s390/cio/cio.c 	sch->config.isc = sch->isc;
sch               489 drivers/s390/cio/cio.c 	sch->config.intparm = intparm;
sch               491 drivers/s390/cio/cio.c 	ret = cio_commit_config(sch);
sch               497 drivers/s390/cio/cio.c 		sch->config.csense = 0;
sch               498 drivers/s390/cio/cio.c 		ret = cio_commit_config(sch);
sch               509 drivers/s390/cio/cio.c int cio_disable_subchannel(struct subchannel *sch)
sch               514 drivers/s390/cio/cio.c 	CIO_TRACE_EVENT(2, dev_name(&sch->dev));
sch               516 drivers/s390/cio/cio.c 	if (sch_is_pseudo_sch(sch))
sch               518 drivers/s390/cio/cio.c 	if (cio_update_schib(sch))
sch               521 drivers/s390/cio/cio.c 	sch->config.ena = 0;
sch               522 drivers/s390/cio/cio.c 	ret = cio_commit_config(sch);
sch               535 drivers/s390/cio/cio.c 	struct subchannel *sch;
sch               542 drivers/s390/cio/cio.c 	sch = (struct subchannel *)(unsigned long) tpi_info->intparm;
sch               543 drivers/s390/cio/cio.c 	if (!sch) {
sch               549 drivers/s390/cio/cio.c 	spin_lock(sch->lock);
sch               553 drivers/s390/cio/cio.c 		memcpy (&sch->schib.scsw, &irb->scsw, sizeof (irb->scsw));
sch               555 drivers/s390/cio/cio.c 		if (sch->driver && sch->driver->irq)
sch               556 drivers/s390/cio/cio.c 			sch->driver->irq(sch);
sch               561 drivers/s390/cio/cio.c 	spin_unlock(sch->lock);
sch               586 drivers/s390/cio/cio.c void cio_tsch(struct subchannel *sch)
sch               593 drivers/s390/cio/cio.c 	if (tsch(sch->schid, irb) != 0)
sch               596 drivers/s390/cio/cio.c 	memcpy(&sch->schib.scsw, &irb->scsw, sizeof(union scsw));
sch               604 drivers/s390/cio/cio.c 	if (sch->driver && sch->driver->irq)
sch               605 drivers/s390/cio/cio.c 		sch->driver->irq(sch);
sch               651 drivers/s390/cio/cio.c 	struct subchannel *sch;
sch               666 drivers/s390/cio/cio.c 	sch = css_alloc_subchannel(schid, &schib);
sch               667 drivers/s390/cio/cio.c 	if (IS_ERR(sch))
sch               668 drivers/s390/cio/cio.c 		return sch;
sch               670 drivers/s390/cio/cio.c 	lockdep_set_class(sch->lock, &console_sch_key);
sch               672 drivers/s390/cio/cio.c 	sch->config.isc = CONSOLE_ISC;
sch               673 drivers/s390/cio/cio.c 	sch->config.intparm = (u32)(addr_t)sch;
sch               674 drivers/s390/cio/cio.c 	ret = cio_commit_config(sch);
sch               677 drivers/s390/cio/cio.c 		put_device(&sch->dev);
sch               680 drivers/s390/cio/cio.c 	console_sch = sch;
sch               681 drivers/s390/cio/cio.c 	return sch;
sch               714 drivers/s390/cio/cio.c int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key)
sch               717 drivers/s390/cio/cio.c 	union orb *orb = &to_io_private(sch)->orb;
sch               720 drivers/s390/cio/cio.c 	orb->tm.intparm = (u32) (addr_t) sch;
sch               723 drivers/s390/cio/cio.c 	orb->tm.lpm = lpm ? lpm : sch->lpm;
sch               725 drivers/s390/cio/cio.c 	cc = ssch(sch->schid, orb);
sch               733 drivers/s390/cio/cio.c 		return cio_start_handle_notoper(sch, lpm);
sch               745 drivers/s390/cio/cio.c int cio_tm_intrg(struct subchannel *sch)
sch               749 drivers/s390/cio/cio.c 	if (!to_io_private(sch)->orb.tm.b)
sch               751 drivers/s390/cio/cio.c 	cc = xsch(sch->schid);
sch               134 drivers/s390/cio/cio.h extern int cio_update_schib(struct subchannel *sch);
sch               135 drivers/s390/cio/cio.h extern int cio_commit_config(struct subchannel *sch);
sch               137 drivers/s390/cio/cio.h int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key);
sch               138 drivers/s390/cio/cio.h int cio_tm_intrg(struct subchannel *sch);
sch               147 drivers/s390/cio/cio.h extern void cio_tsch(struct subchannel *sch);
sch               178 drivers/s390/cio/cmf.c 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
sch               181 drivers/s390/cio/cmf.c 	sch->config.mme = mme;
sch               182 drivers/s390/cio/cmf.c 	sch->config.mbfc = mbfc;
sch               185 drivers/s390/cio/cmf.c 		sch->config.mba = address;
sch               187 drivers/s390/cio/cmf.c 		sch->config.mbi = address;
sch               189 drivers/s390/cio/cmf.c 	ret = cio_commit_config(sch);
sch               271 drivers/s390/cio/cmf.c 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
sch               275 drivers/s390/cio/cmf.c 	if (cio_update_schib(sch))
sch               278 drivers/s390/cio/cmf.c 	if (scsw_fctl(&sch->schib.scsw) & SCSW_FCTL_START_FUNC) {
sch               280 drivers/s390/cio/cmf.c 		if ((!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_SUSPENDED)) &&
sch               281 drivers/s390/cio/cmf.c 		    (scsw_actl(&sch->schib.scsw) &
sch               283 drivers/s390/cio/cmf.c 		    (!(scsw_stctl(&sch->schib.scsw) & SCSW_STCTL_SEC_STATUS)))
sch                72 drivers/s390/cio/css.c 	struct subchannel *sch = to_subchannel(dev);
sch                77 drivers/s390/cio/css.c 		idset_sch_del(cb->set, sch->schid);
sch                79 drivers/s390/cio/css.c 		rc = cb->fn_known_sch(sch, cb->data);
sch                96 drivers/s390/cio/css.c 	struct subchannel *sch;
sch                99 drivers/s390/cio/css.c 	sch = get_subchannel_by_schid(schid);
sch               100 drivers/s390/cio/css.c 	if (sch) {
sch               102 drivers/s390/cio/css.c 			rc = cb->fn_known_sch(sch, cb->data);
sch               103 drivers/s390/cio/css.c 		put_device(&sch->dev);
sch               152 drivers/s390/cio/css.c static int css_sch_create_locks(struct subchannel *sch)
sch               154 drivers/s390/cio/css.c 	sch->lock = kmalloc(sizeof(*sch->lock), GFP_KERNEL);
sch               155 drivers/s390/cio/css.c 	if (!sch->lock)
sch               158 drivers/s390/cio/css.c 	spin_lock_init(sch->lock);
sch               159 drivers/s390/cio/css.c 	mutex_init(&sch->reg_mutex);
sch               166 drivers/s390/cio/css.c 	struct subchannel *sch = to_subchannel(dev);
sch               168 drivers/s390/cio/css.c 	sch->config.intparm = 0;
sch               169 drivers/s390/cio/css.c 	cio_commit_config(sch);
sch               170 drivers/s390/cio/css.c 	kfree(sch->driver_override);
sch               171 drivers/s390/cio/css.c 	kfree(sch->lock);
sch               172 drivers/s390/cio/css.c 	kfree(sch);
sch               208 drivers/s390/cio/css.c 	struct subchannel *sch;
sch               215 drivers/s390/cio/css.c 	sch = kzalloc(sizeof(*sch), GFP_KERNEL | GFP_DMA);
sch               216 drivers/s390/cio/css.c 	if (!sch)
sch               219 drivers/s390/cio/css.c 	sch->schid = schid;
sch               220 drivers/s390/cio/css.c 	sch->schib = *schib;
sch               221 drivers/s390/cio/css.c 	sch->st = schib->pmcw.st;
sch               223 drivers/s390/cio/css.c 	ret = css_sch_create_locks(sch);
sch               227 drivers/s390/cio/css.c 	INIT_WORK(&sch->todo_work, css_sch_todo);
sch               228 drivers/s390/cio/css.c 	sch->dev.release = &css_subchannel_release;
sch               229 drivers/s390/cio/css.c 	device_initialize(&sch->dev);
sch               234 drivers/s390/cio/css.c 	sch->dev.coherent_dma_mask = DMA_BIT_MASK(31);
sch               239 drivers/s390/cio/css.c 	sch->dma_mask = DMA_BIT_MASK(64);
sch               240 drivers/s390/cio/css.c 	sch->dev.dma_mask = &sch->dma_mask;
sch               241 drivers/s390/cio/css.c 	return sch;
sch               244 drivers/s390/cio/css.c 	kfree(sch);
sch               248 drivers/s390/cio/css.c static int css_sch_device_register(struct subchannel *sch)
sch               252 drivers/s390/cio/css.c 	mutex_lock(&sch->reg_mutex);
sch               253 drivers/s390/cio/css.c 	dev_set_name(&sch->dev, "0.%x.%04x", sch->schid.ssid,
sch               254 drivers/s390/cio/css.c 		     sch->schid.sch_no);
sch               255 drivers/s390/cio/css.c 	ret = device_add(&sch->dev);
sch               256 drivers/s390/cio/css.c 	mutex_unlock(&sch->reg_mutex);
sch               264 drivers/s390/cio/css.c void css_sch_device_unregister(struct subchannel *sch)
sch               266 drivers/s390/cio/css.c 	mutex_lock(&sch->reg_mutex);
sch               267 drivers/s390/cio/css.c 	if (device_is_registered(&sch->dev))
sch               268 drivers/s390/cio/css.c 		device_unregister(&sch->dev);
sch               269 drivers/s390/cio/css.c 	mutex_unlock(&sch->reg_mutex);
sch               301 drivers/s390/cio/css.c void css_update_ssd_info(struct subchannel *sch)
sch               305 drivers/s390/cio/css.c 	ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info);
sch               307 drivers/s390/cio/css.c 		ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
sch               309 drivers/s390/cio/css.c 	ssd_register_chpids(&sch->ssd_info);
sch               315 drivers/s390/cio/css.c 	struct subchannel *sch = to_subchannel(dev);
sch               317 drivers/s390/cio/css.c 	return sprintf(buf, "%01x\n", sch->st);
sch               325 drivers/s390/cio/css.c 	struct subchannel *sch = to_subchannel(dev);
sch               327 drivers/s390/cio/css.c 	return sprintf(buf, "css:t%01X\n", sch->st);
sch               336 drivers/s390/cio/css.c 	struct subchannel *sch = to_subchannel(dev);
sch               352 drivers/s390/cio/css.c 	old = sch->driver_override;
sch               354 drivers/s390/cio/css.c 		sch->driver_override = driver_override;
sch               357 drivers/s390/cio/css.c 		sch->driver_override = NULL;
sch               369 drivers/s390/cio/css.c 	struct subchannel *sch = to_subchannel(dev);
sch               373 drivers/s390/cio/css.c 	len = snprintf(buf, PAGE_SIZE, "%s\n", sch->driver_override);
sch               399 drivers/s390/cio/css.c 	struct subchannel *sch = to_subchannel(dev);
sch               400 drivers/s390/cio/css.c 	struct chsc_ssd_info *ssd = &sch->ssd_info;
sch               421 drivers/s390/cio/css.c 	struct subchannel *sch = to_subchannel(dev);
sch               422 drivers/s390/cio/css.c 	struct pmcw *pmcw = &sch->schib.pmcw;
sch               440 drivers/s390/cio/css.c int css_register_subchannel(struct subchannel *sch)
sch               445 drivers/s390/cio/css.c 	sch->dev.parent = &channel_subsystems[0]->device;
sch               446 drivers/s390/cio/css.c 	sch->dev.bus = &css_bus_type;
sch               447 drivers/s390/cio/css.c 	sch->dev.groups = default_subch_attr_groups;
sch               449 drivers/s390/cio/css.c 	if (sch->st == SUBCHANNEL_TYPE_IO)
sch               450 drivers/s390/cio/css.c 		sch->dev.type = &io_subchannel_type;
sch               461 drivers/s390/cio/css.c 	dev_set_uevent_suppress(&sch->dev, 1);
sch               462 drivers/s390/cio/css.c 	css_update_ssd_info(sch);
sch               464 drivers/s390/cio/css.c 	ret = css_sch_device_register(sch);
sch               467 drivers/s390/cio/css.c 			      sch->schid.ssid, sch->schid.sch_no, ret);
sch               470 drivers/s390/cio/css.c 	if (!sch->driver) {
sch               476 drivers/s390/cio/css.c 		dev_set_uevent_suppress(&sch->dev, 0);
sch               477 drivers/s390/cio/css.c 		kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
sch               484 drivers/s390/cio/css.c 	struct subchannel *sch;
sch               487 drivers/s390/cio/css.c 	sch = css_alloc_subchannel(schid, schib);
sch               488 drivers/s390/cio/css.c 	if (IS_ERR(sch))
sch               489 drivers/s390/cio/css.c 		return PTR_ERR(sch);
sch               491 drivers/s390/cio/css.c 	ret = css_register_subchannel(sch);
sch               493 drivers/s390/cio/css.c 		put_device(&sch->dev);
sch               501 drivers/s390/cio/css.c 	struct subchannel *sch;
sch               504 drivers/s390/cio/css.c 	sch = to_subchannel(dev);
sch               505 drivers/s390/cio/css.c 	return schid_equal(&sch->schid, schid);
sch               555 drivers/s390/cio/css.c static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
sch               559 drivers/s390/cio/css.c 	if (sch->driver) {
sch               560 drivers/s390/cio/css.c 		if (sch->driver->sch_event)
sch               561 drivers/s390/cio/css.c 			ret = sch->driver->sch_event(sch, slow);
sch               563 drivers/s390/cio/css.c 			dev_dbg(&sch->dev,
sch               569 drivers/s390/cio/css.c 			      sch->schid.ssid, sch->schid.sch_no, ret);
sch               576 drivers/s390/cio/css.c 	struct subchannel *sch;
sch               579 drivers/s390/cio/css.c 	sch = get_subchannel_by_schid(schid);
sch               580 drivers/s390/cio/css.c 	if (sch) {
sch               581 drivers/s390/cio/css.c 		ret = css_evaluate_known_subchannel(sch, slow);
sch               582 drivers/s390/cio/css.c 		put_device(&sch->dev);
sch               598 drivers/s390/cio/css.c void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
sch               601 drivers/s390/cio/css.c 		      sch->schid.ssid, sch->schid.sch_no, todo);
sch               602 drivers/s390/cio/css.c 	if (sch->todo >= todo)
sch               605 drivers/s390/cio/css.c 	if (!get_device(&sch->dev))
sch               607 drivers/s390/cio/css.c 	sch->todo = todo;
sch               608 drivers/s390/cio/css.c 	if (!queue_work(cio_work_q, &sch->todo_work)) {
sch               610 drivers/s390/cio/css.c 		put_device(&sch->dev);
sch               617 drivers/s390/cio/css.c 	struct subchannel *sch;
sch               621 drivers/s390/cio/css.c 	sch = container_of(work, struct subchannel, todo_work);
sch               623 drivers/s390/cio/css.c 	spin_lock_irq(sch->lock);
sch               624 drivers/s390/cio/css.c 	todo = sch->todo;
sch               625 drivers/s390/cio/css.c 	CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid,
sch               626 drivers/s390/cio/css.c 		      sch->schid.sch_no, todo);
sch               627 drivers/s390/cio/css.c 	sch->todo = SCH_TODO_NOTHING;
sch               628 drivers/s390/cio/css.c 	spin_unlock_irq(sch->lock);
sch               634 drivers/s390/cio/css.c 		ret = css_evaluate_known_subchannel(sch, 1);
sch               636 drivers/s390/cio/css.c 			spin_lock_irq(sch->lock);
sch               637 drivers/s390/cio/css.c 			css_sched_sch_todo(sch, todo);
sch               638 drivers/s390/cio/css.c 			spin_unlock_irq(sch->lock);
sch               642 drivers/s390/cio/css.c 		css_sch_device_unregister(sch);
sch               646 drivers/s390/cio/css.c 	put_device(&sch->dev);
sch               667 drivers/s390/cio/css.c static int slow_eval_known_fn(struct subchannel *sch, void *data)
sch               673 drivers/s390/cio/css.c 	eval = idset_sch_contains(slow_subchannel_set, sch->schid);
sch               674 drivers/s390/cio/css.c 	idset_sch_del(slow_subchannel_set, sch->schid);
sch               677 drivers/s390/cio/css.c 		rc = css_evaluate_known_subchannel(sch, 1);
sch               679 drivers/s390/cio/css.c 			css_schedule_eval(sch->schid);
sch               761 drivers/s390/cio/css.c 	struct subchannel *sch = to_subchannel(dev);
sch               763 drivers/s390/cio/css.c 	idset_sch_del(set, sch->schid);
sch               809 drivers/s390/cio/css.c 	struct subchannel *sch;
sch               830 drivers/s390/cio/css.c 		sch = get_subchannel_by_schid(mchk_schid);
sch               831 drivers/s390/cio/css.c 		if (sch) {
sch               832 drivers/s390/cio/css.c 			css_update_ssd_info(sch);
sch               833 drivers/s390/cio/css.c 			put_device(&sch->dev);
sch              1394 drivers/s390/cio/css.c int sch_is_pseudo_sch(struct subchannel *sch)
sch              1396 drivers/s390/cio/css.c 	if (!sch->dev.parent)
sch              1398 drivers/s390/cio/css.c 	return sch == to_css(sch->dev.parent)->pseudo_subchannel;
sch              1403 drivers/s390/cio/css.c 	struct subchannel *sch = to_subchannel(dev);
sch              1408 drivers/s390/cio/css.c 	if (sch->driver_override && strcmp(sch->driver_override, drv->name))
sch              1412 drivers/s390/cio/css.c 		if (sch->st == id->type)
sch              1421 drivers/s390/cio/css.c 	struct subchannel *sch;
sch              1424 drivers/s390/cio/css.c 	sch = to_subchannel(dev);
sch              1425 drivers/s390/cio/css.c 	sch->driver = to_cssdriver(dev->driver);
sch              1426 drivers/s390/cio/css.c 	ret = sch->driver->probe ? sch->driver->probe(sch) : 0;
sch              1428 drivers/s390/cio/css.c 		sch->driver = NULL;
sch              1434 drivers/s390/cio/css.c 	struct subchannel *sch;
sch              1437 drivers/s390/cio/css.c 	sch = to_subchannel(dev);
sch              1438 drivers/s390/cio/css.c 	ret = sch->driver->remove ? sch->driver->remove(sch) : 0;
sch              1439 drivers/s390/cio/css.c 	sch->driver = NULL;
sch              1445 drivers/s390/cio/css.c 	struct subchannel *sch;
sch              1447 drivers/s390/cio/css.c 	sch = to_subchannel(dev);
sch              1448 drivers/s390/cio/css.c 	if (sch->driver && sch->driver->shutdown)
sch              1449 drivers/s390/cio/css.c 		sch->driver->shutdown(sch);
sch              1454 drivers/s390/cio/css.c 	struct subchannel *sch = to_subchannel(dev);
sch              1457 drivers/s390/cio/css.c 	ret = add_uevent_var(env, "ST=%01X", sch->st);
sch              1460 drivers/s390/cio/css.c 	ret = add_uevent_var(env, "MODALIAS=css:t%01X", sch->st);
sch              1466 drivers/s390/cio/css.c 	struct subchannel *sch = to_subchannel(dev);
sch              1469 drivers/s390/cio/css.c 	if (mutex_is_locked(&sch->reg_mutex))
sch              1471 drivers/s390/cio/css.c 	if (!sch->dev.driver)
sch              1473 drivers/s390/cio/css.c 	drv = to_cssdriver(sch->dev.driver);
sch              1475 drivers/s390/cio/css.c 	return drv->prepare ? drv->prepare(sch) : 0;
sch              1480 drivers/s390/cio/css.c 	struct subchannel *sch = to_subchannel(dev);
sch              1483 drivers/s390/cio/css.c 	if (!sch->dev.driver)
sch              1485 drivers/s390/cio/css.c 	drv = to_cssdriver(sch->dev.driver);
sch              1487 drivers/s390/cio/css.c 		drv->complete(sch);
sch              1492 drivers/s390/cio/css.c 	struct subchannel *sch = to_subchannel(dev);
sch              1495 drivers/s390/cio/css.c 	if (!sch->dev.driver)
sch              1497 drivers/s390/cio/css.c 	drv = to_cssdriver(sch->dev.driver);
sch              1498 drivers/s390/cio/css.c 	return drv->freeze ? drv->freeze(sch) : 0;
sch              1503 drivers/s390/cio/css.c 	struct subchannel *sch = to_subchannel(dev);
sch              1506 drivers/s390/cio/css.c 	if (!sch->dev.driver)
sch              1508 drivers/s390/cio/css.c 	drv = to_cssdriver(sch->dev.driver);
sch              1509 drivers/s390/cio/css.c 	return drv->thaw ? drv->thaw(sch) : 0;
sch              1514 drivers/s390/cio/css.c 	struct subchannel *sch = to_subchannel(dev);
sch              1517 drivers/s390/cio/css.c 	css_update_ssd_info(sch);
sch              1518 drivers/s390/cio/css.c 	if (!sch->dev.driver)
sch              1520 drivers/s390/cio/css.c 	drv = to_cssdriver(sch->dev.driver);
sch              1521 drivers/s390/cio/css.c 	return drv->restore ? drv->restore(sch) : 0;
sch               115 drivers/s390/cio/css.h void css_update_ssd_info(struct subchannel *sch);
sch               156 drivers/s390/cio/css.h void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo);
sch               152 drivers/s390/cio/device.c static int io_subchannel_prepare(struct subchannel *sch)
sch               159 drivers/s390/cio/device.c 	cdev = sch_get_cdev(sch);
sch               287 drivers/s390/cio/device.c 	struct subchannel *sch;
sch               301 drivers/s390/cio/device.c 	sch = to_subchannel(cdev->dev.parent);
sch               322 drivers/s390/cio/device.c 		io_subchannel_quiesce(sch);
sch               554 drivers/s390/cio/device.c 	struct subchannel *sch;
sch               564 drivers/s390/cio/device.c 		sch = to_subchannel(dev->parent);
sch               565 drivers/s390/cio/device.c 		if (!sch->lpm)
sch               579 drivers/s390/cio/device.c 	struct subchannel *sch = to_subchannel(dev);
sch               582 drivers/s390/cio/device.c 	rc = chsc_siosl(sch->schid);
sch               585 drivers/s390/cio/device.c 			sch->schid.ssid, sch->schid.sch_no, rc);
sch               589 drivers/s390/cio/device.c 		  sch->schid.ssid, sch->schid.sch_no);
sch               596 drivers/s390/cio/device.c 	struct subchannel *sch = to_subchannel(dev);
sch               598 drivers/s390/cio/device.c 	return sprintf(buf, "%02x\n", sch->vpm);
sch               700 drivers/s390/cio/device.c static struct ccw_device * io_subchannel_allocate_dev(struct subchannel *sch)
sch               712 drivers/s390/cio/device.c 	cdev->dev.coherent_dma_mask = sch->dev.coherent_dma_mask;
sch               713 drivers/s390/cio/device.c 	cdev->dev.dma_mask = sch->dev.dma_mask;
sch               735 drivers/s390/cio/device.c static int io_subchannel_initialize_dev(struct subchannel *sch,
sch               744 drivers/s390/cio/device.c 	priv->dev_id.devno = sch->schib.pmcw.dev;
sch               745 drivers/s390/cio/device.c 	priv->dev_id.ssid = sch->schid.ssid;
sch               753 drivers/s390/cio/device.c 	cdev->ccwlock = sch->lock;
sch               754 drivers/s390/cio/device.c 	cdev->dev.parent = &sch->dev;
sch               763 drivers/s390/cio/device.c 	if (!get_device(&sch->dev)) {
sch               768 drivers/s390/cio/device.c 	spin_lock_irq(sch->lock);
sch               769 drivers/s390/cio/device.c 	sch_set_cdev(sch, cdev);
sch               770 drivers/s390/cio/device.c 	spin_unlock_irq(sch->lock);
sch               779 drivers/s390/cio/device.c static struct ccw_device * io_subchannel_create_ccwdev(struct subchannel *sch)
sch               784 drivers/s390/cio/device.c 	cdev = io_subchannel_allocate_dev(sch);
sch               786 drivers/s390/cio/device.c 		ret = io_subchannel_initialize_dev(sch, cdev);
sch               795 drivers/s390/cio/device.c static void sch_create_and_recog_new_device(struct subchannel *sch)
sch               800 drivers/s390/cio/device.c 	cdev = io_subchannel_create_ccwdev(sch);
sch               803 drivers/s390/cio/device.c 		css_sch_device_unregister(sch);
sch               807 drivers/s390/cio/device.c 	io_subchannel_recog(cdev, sch);
sch               815 drivers/s390/cio/device.c 	struct subchannel *sch;
sch               819 drivers/s390/cio/device.c 	sch = to_subchannel(cdev->dev.parent);
sch               826 drivers/s390/cio/device.c 	if (!device_is_registered(&sch->dev))
sch               828 drivers/s390/cio/device.c 	css_update_ssd_info(sch);
sch               852 drivers/s390/cio/device.c 	if (dev_get_uevent_suppress(&sch->dev)) {
sch               853 drivers/s390/cio/device.c 		dev_set_uevent_suppress(&sch->dev, 0);
sch               854 drivers/s390/cio/device.c 		kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
sch               862 drivers/s390/cio/device.c 		spin_lock_irqsave(sch->lock, flags);
sch               863 drivers/s390/cio/device.c 		sch_set_cdev(sch, NULL);
sch               864 drivers/s390/cio/device.c 		spin_unlock_irqrestore(sch->lock, flags);
sch               879 drivers/s390/cio/device.c 	struct subchannel *sch;
sch               884 drivers/s390/cio/device.c 	sch = to_subchannel(cdev->dev.parent);
sch               885 drivers/s390/cio/device.c 	css_sch_device_unregister(sch);
sch               887 drivers/s390/cio/device.c 	put_device(&sch->dev);
sch               920 drivers/s390/cio/device.c static void io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
sch               926 drivers/s390/cio/device.c 	spin_lock_irq(sch->lock);
sch               928 drivers/s390/cio/device.c 	spin_unlock_irq(sch->lock);
sch               932 drivers/s390/cio/device.c 				  struct subchannel *sch)
sch               939 drivers/s390/cio/device.c 	if (!get_device(&sch->dev))
sch               951 drivers/s390/cio/device.c 			put_device(&sch->dev);
sch               956 drivers/s390/cio/device.c 	mutex_lock(&sch->reg_mutex);
sch               957 drivers/s390/cio/device.c 	rc = device_move(&cdev->dev, &sch->dev, DPM_ORDER_PARENT_BEFORE_DEV);
sch               958 drivers/s390/cio/device.c 	mutex_unlock(&sch->reg_mutex);
sch               962 drivers/s390/cio/device.c 			      cdev->private->dev_id.devno, sch->schid.ssid,
sch               963 drivers/s390/cio/device.c 			      sch->schib.pmcw.dev, rc);
sch               971 drivers/s390/cio/device.c 		put_device(&sch->dev);
sch               984 drivers/s390/cio/device.c 	spin_lock_irq(sch->lock);
sch               985 drivers/s390/cio/device.c 	cdev->ccwlock = sch->lock;
sch               986 drivers/s390/cio/device.c 	if (!sch_is_pseudo_sch(sch))
sch               987 drivers/s390/cio/device.c 		sch_set_cdev(sch, cdev);
sch               988 drivers/s390/cio/device.c 	spin_unlock_irq(sch->lock);
sch               989 drivers/s390/cio/device.c 	if (!sch_is_pseudo_sch(sch))
sch               990 drivers/s390/cio/device.c 		css_update_ssd_info(sch);
sch               996 drivers/s390/cio/device.c 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
sch               997 drivers/s390/cio/device.c 	struct channel_subsystem *css = to_css(sch->dev.parent);
sch              1002 drivers/s390/cio/device.c static void io_subchannel_irq(struct subchannel *sch)
sch              1006 drivers/s390/cio/device.c 	cdev = sch_get_cdev(sch);
sch              1009 drivers/s390/cio/device.c 	CIO_TRACE_EVENT(6, dev_name(&sch->dev));
sch              1016 drivers/s390/cio/device.c void io_subchannel_init_config(struct subchannel *sch)
sch              1018 drivers/s390/cio/device.c 	memset(&sch->config, 0, sizeof(sch->config));
sch              1019 drivers/s390/cio/device.c 	sch->config.csense = 1;
sch              1022 drivers/s390/cio/device.c static void io_subchannel_init_fields(struct subchannel *sch)
sch              1024 drivers/s390/cio/device.c 	if (cio_is_console(sch->schid))
sch              1025 drivers/s390/cio/device.c 		sch->opm = 0xff;
sch              1027 drivers/s390/cio/device.c 		sch->opm = chp_get_sch_opm(sch);
sch              1028 drivers/s390/cio/device.c 	sch->lpm = sch->schib.pmcw.pam & sch->opm;
sch              1029 drivers/s390/cio/device.c 	sch->isc = cio_is_console(sch->schid) ? CONSOLE_ISC : IO_SCH_ISC;
sch              1033 drivers/s390/cio/device.c 		      sch->schib.pmcw.dev, sch->schid.ssid,
sch              1034 drivers/s390/cio/device.c 		      sch->schid.sch_no, sch->schib.pmcw.pim,
sch              1035 drivers/s390/cio/device.c 		      sch->schib.pmcw.pam, sch->schib.pmcw.pom);
sch              1037 drivers/s390/cio/device.c 	io_subchannel_init_config(sch);
sch              1044 drivers/s390/cio/device.c static int io_subchannel_probe(struct subchannel *sch)
sch              1050 drivers/s390/cio/device.c 	if (cio_is_console(sch->schid)) {
sch              1051 drivers/s390/cio/device.c 		rc = sysfs_create_group(&sch->dev.kobj,
sch              1057 drivers/s390/cio/device.c 				      sch->schid.ssid, sch->schid.sch_no, rc);
sch              1063 drivers/s390/cio/device.c 		if (dev_get_uevent_suppress(&sch->dev)) {
sch              1065 drivers/s390/cio/device.c 			dev_set_uevent_suppress(&sch->dev, 0);
sch              1066 drivers/s390/cio/device.c 			kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
sch              1068 drivers/s390/cio/device.c 		cdev = sch_get_cdev(sch);
sch              1079 drivers/s390/cio/device.c 	io_subchannel_init_fields(sch);
sch              1080 drivers/s390/cio/device.c 	rc = cio_commit_config(sch);
sch              1083 drivers/s390/cio/device.c 	rc = sysfs_create_group(&sch->dev.kobj,
sch              1092 drivers/s390/cio/device.c 	io_priv->dma_area = dma_alloc_coherent(&sch->dev,
sch              1100 drivers/s390/cio/device.c 	set_io_private(sch, io_priv);
sch              1101 drivers/s390/cio/device.c 	css_schedule_eval(sch->schid);
sch              1105 drivers/s390/cio/device.c 	spin_lock_irq(sch->lock);
sch              1106 drivers/s390/cio/device.c 	css_sched_sch_todo(sch, SCH_TODO_UNREG);
sch              1107 drivers/s390/cio/device.c 	spin_unlock_irq(sch->lock);
sch              1111 drivers/s390/cio/device.c static int io_subchannel_remove(struct subchannel *sch)
sch              1113 drivers/s390/cio/device.c 	struct io_subchannel_private *io_priv = to_io_private(sch);
sch              1116 drivers/s390/cio/device.c 	cdev = sch_get_cdev(sch);
sch              1121 drivers/s390/cio/device.c 	spin_lock_irq(sch->lock);
sch              1122 drivers/s390/cio/device.c 	sch_set_cdev(sch, NULL);
sch              1123 drivers/s390/cio/device.c 	set_io_private(sch, NULL);
sch              1124 drivers/s390/cio/device.c 	spin_unlock_irq(sch->lock);
sch              1126 drivers/s390/cio/device.c 	dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area),
sch              1129 drivers/s390/cio/device.c 	sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
sch              1133 drivers/s390/cio/device.c static void io_subchannel_verify(struct subchannel *sch)
sch              1137 drivers/s390/cio/device.c 	cdev = sch_get_cdev(sch);
sch              1142 drivers/s390/cio/device.c static void io_subchannel_terminate_path(struct subchannel *sch, u8 mask)
sch              1146 drivers/s390/cio/device.c 	cdev = sch_get_cdev(sch);
sch              1149 drivers/s390/cio/device.c 	if (cio_update_schib(sch))
sch              1152 drivers/s390/cio/device.c 	if (scsw_actl(&sch->schib.scsw) == 0 || sch->schib.pmcw.lpum != mask)
sch              1158 drivers/s390/cio/device.c 	if (cio_clear(sch))
sch              1169 drivers/s390/cio/device.c static int io_subchannel_chp_event(struct subchannel *sch,
sch              1172 drivers/s390/cio/device.c 	struct ccw_device *cdev = sch_get_cdev(sch);
sch              1175 drivers/s390/cio/device.c 	mask = chp_ssd_get_mask(&sch->ssd_info, link);
sch              1180 drivers/s390/cio/device.c 		sch->opm &= ~mask;
sch              1181 drivers/s390/cio/device.c 		sch->lpm &= ~mask;
sch              1184 drivers/s390/cio/device.c 		io_subchannel_terminate_path(sch, mask);
sch              1187 drivers/s390/cio/device.c 		sch->opm |= mask;
sch              1188 drivers/s390/cio/device.c 		sch->lpm |= mask;
sch              1191 drivers/s390/cio/device.c 		io_subchannel_verify(sch);
sch              1194 drivers/s390/cio/device.c 		if (cio_update_schib(sch))
sch              1198 drivers/s390/cio/device.c 		io_subchannel_terminate_path(sch, mask);
sch              1201 drivers/s390/cio/device.c 		if (cio_update_schib(sch))
sch              1203 drivers/s390/cio/device.c 		sch->lpm |= mask & sch->opm;
sch              1206 drivers/s390/cio/device.c 		io_subchannel_verify(sch);
sch              1212 drivers/s390/cio/device.c static void io_subchannel_quiesce(struct subchannel *sch)
sch              1217 drivers/s390/cio/device.c 	spin_lock_irq(sch->lock);
sch              1218 drivers/s390/cio/device.c 	cdev = sch_get_cdev(sch);
sch              1219 drivers/s390/cio/device.c 	if (cio_is_console(sch->schid))
sch              1221 drivers/s390/cio/device.c 	if (!sch->schib.pmcw.ena)
sch              1223 drivers/s390/cio/device.c 	ret = cio_disable_subchannel(sch);
sch              1234 drivers/s390/cio/device.c 			spin_unlock_irq(sch->lock);
sch              1237 drivers/s390/cio/device.c 			spin_lock_irq(sch->lock);
sch              1239 drivers/s390/cio/device.c 		ret = cio_disable_subchannel(sch);
sch              1242 drivers/s390/cio/device.c 	spin_unlock_irq(sch->lock);
sch              1245 drivers/s390/cio/device.c static void io_subchannel_shutdown(struct subchannel *sch)
sch              1247 drivers/s390/cio/device.c 	io_subchannel_quiesce(sch);
sch              1261 drivers/s390/cio/device.c 	struct subchannel *sch;
sch              1267 drivers/s390/cio/device.c 		sch = to_subchannel(cdev->dev.parent);
sch              1268 drivers/s390/cio/device.c 		if ((sch->schib.pmcw.pam & sch->opm) == sch->vpm)
sch              1376 drivers/s390/cio/device.c 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
sch              1379 drivers/s390/cio/device.c 	CIO_TRACE_EVENT(2, dev_name(&sch->dev));
sch              1381 drivers/s390/cio/device.c 	cio_disable_subchannel(sch);
sch              1397 drivers/s390/cio/device.c static enum io_sch_action sch_get_action(struct subchannel *sch)
sch              1401 drivers/s390/cio/device.c 	cdev = sch_get_cdev(sch);
sch              1402 drivers/s390/cio/device.c 	if (cio_update_schib(sch)) {
sch              1413 drivers/s390/cio/device.c 	if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {
sch              1418 drivers/s390/cio/device.c 	if ((sch->schib.pmcw.pam & sch->opm) == 0) {
sch              1442 drivers/s390/cio/device.c static int io_subchannel_sch_event(struct subchannel *sch, int process)
sch              1450 drivers/s390/cio/device.c 	spin_lock_irqsave(sch->lock, flags);
sch              1451 drivers/s390/cio/device.c 	if (!device_is_registered(&sch->dev))
sch              1453 drivers/s390/cio/device.c 	if (work_pending(&sch->todo_work))
sch              1455 drivers/s390/cio/device.c 	cdev = sch_get_cdev(sch);
sch              1458 drivers/s390/cio/device.c 	action = sch_get_action(sch);
sch              1460 drivers/s390/cio/device.c 		      sch->schid.ssid, sch->schid.sch_no, process,
sch              1471 drivers/s390/cio/device.c 		io_subchannel_verify(sch);
sch              1502 drivers/s390/cio/device.c 	spin_unlock_irqrestore(sch->lock, flags);
sch              1516 drivers/s390/cio/device.c 		spin_lock_irqsave(sch->lock, flags);
sch              1522 drivers/s390/cio/device.c 		sch_set_cdev(sch, NULL);
sch              1523 drivers/s390/cio/device.c 		spin_unlock_irqrestore(sch->lock, flags);
sch              1535 drivers/s390/cio/device.c 			css_sch_device_unregister(sch);
sch              1540 drivers/s390/cio/device.c 		dev_id.ssid = sch->schid.ssid;
sch              1541 drivers/s390/cio/device.c 		dev_id.devno = sch->schib.pmcw.dev;
sch              1544 drivers/s390/cio/device.c 			sch_create_and_recog_new_device(sch);
sch              1547 drivers/s390/cio/device.c 		rc = ccw_device_move_to_sch(cdev, sch);
sch              1553 drivers/s390/cio/device.c 		spin_lock_irqsave(sch->lock, flags);
sch              1555 drivers/s390/cio/device.c 		spin_unlock_irqrestore(sch->lock, flags);
sch              1565 drivers/s390/cio/device.c 	spin_unlock_irqrestore(sch->lock, flags);
sch              1585 drivers/s390/cio/device.c 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
sch              1591 drivers/s390/cio/device.c 	io_subchannel_init_fields(sch);
sch              1592 drivers/s390/cio/device.c 	rc = cio_commit_config(sch);
sch              1595 drivers/s390/cio/device.c 	sch->driver = &io_subchannel_driver;
sch              1596 drivers/s390/cio/device.c 	io_subchannel_recog(cdev, sch);
sch              1626 drivers/s390/cio/device.c 	struct subchannel *sch;
sch              1628 drivers/s390/cio/device.c 	sch = cio_probe_console();
sch              1629 drivers/s390/cio/device.c 	if (IS_ERR(sch))
sch              1630 drivers/s390/cio/device.c 		return ERR_CAST(sch);
sch              1635 drivers/s390/cio/device.c 	io_priv->dma_area = dma_alloc_coherent(&sch->dev,
sch              1640 drivers/s390/cio/device.c 	set_io_private(sch, io_priv);
sch              1641 drivers/s390/cio/device.c 	cdev = io_subchannel_create_ccwdev(sch);
sch              1643 drivers/s390/cio/device.c 		dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area),
sch              1645 drivers/s390/cio/device.c 		set_io_private(sch, NULL);
sch              1646 drivers/s390/cio/device.c 		put_device(&sch->dev);
sch              1657 drivers/s390/cio/device.c 	put_device(&sch->dev);
sch              1663 drivers/s390/cio/device.c 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
sch              1664 drivers/s390/cio/device.c 	struct io_subchannel_private *io_priv = to_io_private(sch);
sch              1666 drivers/s390/cio/device.c 	set_io_private(sch, NULL);
sch              1667 drivers/s390/cio/device.c 	put_device(&sch->dev);
sch              1669 drivers/s390/cio/device.c 	dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area),
sch              1684 drivers/s390/cio/device.c 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
sch              1687 drivers/s390/cio/device.c 		cio_tsch(sch);
sch              1688 drivers/s390/cio/device.c 		if (sch->schib.scsw.cmd.actl == 0)
sch              1757 drivers/s390/cio/device.c 	struct subchannel *sch;
sch              1783 drivers/s390/cio/device.c 	sch = to_subchannel(cdev->dev.parent);
sch              1785 drivers/s390/cio/device.c 	io_subchannel_quiesce(sch);
sch              1828 drivers/s390/cio/device.c 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
sch              1842 drivers/s390/cio/device.c 	spin_lock_irq(sch->lock);
sch              1844 drivers/s390/cio/device.c 	spin_unlock_irq(sch->lock);
sch              1852 drivers/s390/cio/device.c 	spin_lock_irq(sch->lock);
sch              1853 drivers/s390/cio/device.c 	ret = cio_disable_subchannel(sch);
sch              1854 drivers/s390/cio/device.c 	spin_unlock_irq(sch->lock);
sch              1862 drivers/s390/cio/device.c 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
sch              1868 drivers/s390/cio/device.c 	spin_lock_irq(sch->lock);
sch              1870 drivers/s390/cio/device.c 	ret = cio_enable_subchannel(sch, (u32)(addr_t)sch);
sch              1872 drivers/s390/cio/device.c 	spin_unlock_irq(sch->lock);
sch              1890 drivers/s390/cio/device.c 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
sch              1892 drivers/s390/cio/device.c 	spin_lock_irq(sch->lock);
sch              1893 drivers/s390/cio/device.c 	if (cio_is_console(sch->schid)) {
sch              1894 drivers/s390/cio/device.c 		cio_enable_subchannel(sch, (u32)(addr_t)sch);
sch              1903 drivers/s390/cio/device.c 	css_sched_sch_todo(sch, SCH_TODO_EVAL);
sch              1904 drivers/s390/cio/device.c 	spin_unlock_irq(sch->lock);
sch              1908 drivers/s390/cio/device.c 	sch = to_subchannel(cdev->dev.parent);
sch              1909 drivers/s390/cio/device.c 	spin_lock_irq(sch->lock);
sch              1915 drivers/s390/cio/device.c 	spin_unlock_irq(sch->lock);
sch              1918 drivers/s390/cio/device.c 	spin_lock_irq(sch->lock);
sch              1922 drivers/s390/cio/device.c 	spin_unlock_irq(sch->lock);
sch              1946 drivers/s390/cio/device.c 	struct subchannel *sch;
sch              1950 drivers/s390/cio/device.c 	sch = to_subchannel(cdev->dev.parent);
sch              1951 drivers/s390/cio/device.c 	spin_lock_irq(sch->lock);
sch              1952 drivers/s390/cio/device.c 	if (cio_is_console(sch->schid))
sch              1988 drivers/s390/cio/device.c 	spin_unlock_irq(sch->lock);
sch              1990 drivers/s390/cio/device.c 	spin_lock_irq(sch->lock);
sch              2000 drivers/s390/cio/device.c 		spin_unlock_irq(sch->lock);
sch              2002 drivers/s390/cio/device.c 		spin_lock_irq(sch->lock);
sch              2012 drivers/s390/cio/device.c 	spin_unlock_irq(sch->lock);
sch              2018 drivers/s390/cio/device.c 	spin_unlock_irq(sch->lock);
sch              2072 drivers/s390/cio/device.c 	struct subchannel *sch;
sch              2077 drivers/s390/cio/device.c 	sch = to_subchannel(cdev->dev.parent);
sch              2097 drivers/s390/cio/device.c 		if (!sch_is_pseudo_sch(sch))
sch              2098 drivers/s390/cio/device.c 			css_schedule_eval(sch->schid);
sch              2101 drivers/s390/cio/device.c 		if (sch_is_pseudo_sch(sch))
sch              2148 drivers/s390/cio/device.c 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
sch              2150 drivers/s390/cio/device.c 	return chsc_siosl(sch->schid);
sch                88 drivers/s390/cio/device.h void io_subchannel_init_config(struct subchannel *sch);
sch                40 drivers/s390/cio/device_fsm.c 	struct subchannel *sch;
sch                45 drivers/s390/cio/device_fsm.c 	sch = to_subchannel(cdev->dev.parent);
sch                46 drivers/s390/cio/device_fsm.c 	private = to_io_private(sch);
sch                48 drivers/s390/cio/device_fsm.c 	cc = stsch(sch->schid, &schib);
sch                58 drivers/s390/cio/device_fsm.c 	       dev_name(&sch->dev));
sch                60 drivers/s390/cio/device_fsm.c 	       "vpm: %02x\n", sch->lpm, sch->opm, sch->vpm);
sch               131 drivers/s390/cio/device_fsm.c 	struct subchannel *sch;
sch               134 drivers/s390/cio/device_fsm.c 	sch = to_subchannel(cdev->dev.parent);
sch               135 drivers/s390/cio/device_fsm.c 	ret = cio_cancel_halt_clear(sch, &cdev->private->iretry);
sch               172 drivers/s390/cio/device_fsm.c __recover_lost_chpids(struct subchannel *sch, int old_lpm)
sch               180 drivers/s390/cio/device_fsm.c 		if (!(sch->lpm & mask))
sch               184 drivers/s390/cio/device_fsm.c 		chpid.id = sch->schib.pmcw.chpid[i];
sch               196 drivers/s390/cio/device_fsm.c 	struct subchannel *sch;
sch               199 drivers/s390/cio/device_fsm.c 	sch = to_subchannel(cdev->dev.parent);
sch               201 drivers/s390/cio/device_fsm.c 	if (cio_disable_subchannel(sch))
sch               207 drivers/s390/cio/device_fsm.c 	old_lpm = sch->lpm;
sch               210 drivers/s390/cio/device_fsm.c 	if (cio_update_schib(sch))
sch               213 drivers/s390/cio/device_fsm.c 		sch->lpm = sch->schib.pmcw.pam & sch->opm;
sch               218 drivers/s390/cio/device_fsm.c 	if (sch->lpm != old_lpm)
sch               219 drivers/s390/cio/device_fsm.c 		__recover_lost_chpids(sch, old_lpm);
sch               321 drivers/s390/cio/device_fsm.c 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
sch               327 drivers/s390/cio/device_fsm.c 		cdev->private->path_new_mask = sch->vpm;
sch               341 drivers/s390/cio/device_fsm.c 	struct subchannel *sch;
sch               343 drivers/s390/cio/device_fsm.c 	sch = to_subchannel(cdev->dev.parent);
sch               348 drivers/s390/cio/device_fsm.c 		cio_disable_subchannel(sch);
sch               358 drivers/s390/cio/device_fsm.c 			      cdev->private->dev_id.devno, sch->schid.sch_no);
sch               366 drivers/s390/cio/device_fsm.c 			      cdev->private->dev_id.devno, sch->schid.sch_no);
sch               376 drivers/s390/cio/device_fsm.c 			      sch->schid.sch_no);
sch               400 drivers/s390/cio/device_fsm.c 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
sch               412 drivers/s390/cio/device_fsm.c 	if (cio_enable_subchannel(sch, (u32) (addr_t) sch)) {
sch               441 drivers/s390/cio/device_fsm.c 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
sch               447 drivers/s390/cio/device_fsm.c 		if (mask & cdev->private->path_gone_mask & ~(sch->vpm))
sch               449 drivers/s390/cio/device_fsm.c 		if (mask & cdev->private->path_new_mask & sch->vpm)
sch               451 drivers/s390/cio/device_fsm.c 		if (mask & cdev->private->pgid_reset_mask & sch->vpm)
sch               486 drivers/s390/cio/device_fsm.c 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
sch               487 drivers/s390/cio/device_fsm.c 	u8 broken_paths = (sch->schib.pmcw.pam & sch->opm) ^ sch->vpm;
sch               497 drivers/s390/cio/device_fsm.c 	struct subchannel *sch;
sch               499 drivers/s390/cio/device_fsm.c 	sch = to_subchannel(cdev->dev.parent);
sch               501 drivers/s390/cio/device_fsm.c 	if (cio_update_schib(sch)) {
sch               506 drivers/s390/cio/device_fsm.c 	sch->lpm = sch->vpm;
sch               556 drivers/s390/cio/device_fsm.c 	struct subchannel *sch;
sch               562 drivers/s390/cio/device_fsm.c 	sch = to_subchannel(cdev->dev.parent);
sch               563 drivers/s390/cio/device_fsm.c 	ret = cio_enable_subchannel(sch, (u32)(addr_t)sch);
sch               599 drivers/s390/cio/device_fsm.c 	struct subchannel *sch;
sch               616 drivers/s390/cio/device_fsm.c 	sch = to_subchannel(cdev->dev.parent);
sch               617 drivers/s390/cio/device_fsm.c 	if (cio_update_schib(sch))
sch               619 drivers/s390/cio/device_fsm.c 	if (scsw_actl(&sch->schib.scsw) != 0)
sch               653 drivers/s390/cio/device_fsm.c 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
sch               655 drivers/s390/cio/device_fsm.c 	css_schedule_eval(sch->schid);
sch               664 drivers/s390/cio/device_fsm.c 	struct subchannel *sch;
sch               670 drivers/s390/cio/device_fsm.c 	sch = to_subchannel(cdev->dev.parent);
sch               675 drivers/s390/cio/device_fsm.c 	if (cio_update_schib(sch)) {
sch               680 drivers/s390/cio/device_fsm.c 	if (scsw_actl(&sch->schib.scsw) != 0 ||
sch               681 drivers/s390/cio/device_fsm.c 	    (scsw_stctl(&sch->schib.scsw) & SCSW_STCTL_STATUS_PEND) ||
sch               703 drivers/s390/cio/device_fsm.c 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
sch               706 drivers/s390/cio/device_fsm.c 		if (cio_enable_subchannel(sch, (u32) (addr_t) sch))
sch               711 drivers/s390/cio/device_fsm.c 		css_schedule_eval(sch->schid);
sch               934 drivers/s390/cio/device_fsm.c 	struct subchannel *sch;
sch               936 drivers/s390/cio/device_fsm.c 	sch = to_subchannel(cdev->dev.parent);
sch               937 drivers/s390/cio/device_fsm.c 	if (cio_enable_subchannel(sch, (u32)(addr_t)sch) != 0)
sch               946 drivers/s390/cio/device_fsm.c 	struct subchannel *sch;
sch               951 drivers/s390/cio/device_fsm.c 	sch = to_subchannel(cdev->dev.parent);
sch               953 drivers/s390/cio/device_fsm.c 	if (cio_update_schib(sch))
sch               959 drivers/s390/cio/device_fsm.c 	sch->lpm = sch->schib.pmcw.pam & sch->opm;
sch               964 drivers/s390/cio/device_fsm.c 	io_subchannel_init_config(sch);
sch               965 drivers/s390/cio/device_fsm.c 	if (cio_commit_config(sch))
sch               970 drivers/s390/cio/device_fsm.c 	if (sch->schib.pmcw.dev != cdev->private->dev_id.devno)
sch               971 drivers/s390/cio/device_fsm.c 		css_schedule_eval(sch->schid);
sch               979 drivers/s390/cio/device_fsm.c 	struct subchannel *sch;
sch               981 drivers/s390/cio/device_fsm.c 	sch = to_subchannel(cdev->dev.parent);
sch               986 drivers/s390/cio/device_fsm.c 	cio_disable_subchannel(sch);
sch               203 drivers/s390/cio/device_id.c 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
sch               221 drivers/s390/cio/device_id.c 	req->lpm	= sch->schib.pmcw.pam & sch->opm;
sch               139 drivers/s390/cio/device_ops.c 	struct subchannel *sch;
sch               144 drivers/s390/cio/device_ops.c 	sch = to_subchannel(cdev->dev.parent);
sch               145 drivers/s390/cio/device_ops.c 	if (!sch->schib.pmcw.ena)
sch               153 drivers/s390/cio/device_ops.c 	ret = cio_clear(sch);
sch               195 drivers/s390/cio/device_ops.c 	struct subchannel *sch;
sch               200 drivers/s390/cio/device_ops.c 	sch = to_subchannel(cdev->dev.parent);
sch               201 drivers/s390/cio/device_ops.c 	if (!sch->schib.pmcw.ena)
sch               216 drivers/s390/cio/device_ops.c 	    ((sch->schib.scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) &&
sch               217 drivers/s390/cio/device_ops.c 	     !(sch->schib.scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS)) ||
sch               220 drivers/s390/cio/device_ops.c 	ret = cio_set_options (sch, flags);
sch               225 drivers/s390/cio/device_ops.c 		lpm &= sch->lpm;
sch               229 drivers/s390/cio/device_ops.c 	ret = cio_start_key (sch, cpa, lpm, key);
sch               372 drivers/s390/cio/device_ops.c 	struct subchannel *sch;
sch               377 drivers/s390/cio/device_ops.c 	sch = to_subchannel(cdev->dev.parent);
sch               378 drivers/s390/cio/device_ops.c 	if (!sch->schib.pmcw.ena)
sch               386 drivers/s390/cio/device_ops.c 	ret = cio_halt(sch);
sch               407 drivers/s390/cio/device_ops.c 	struct subchannel *sch;
sch               411 drivers/s390/cio/device_ops.c 	sch = to_subchannel(cdev->dev.parent);
sch               412 drivers/s390/cio/device_ops.c 	if (!sch->schib.pmcw.ena)
sch               417 drivers/s390/cio/device_ops.c 	    !(sch->schib.scsw.cmd.actl & SCSW_ACTL_SUSPENDED))
sch               419 drivers/s390/cio/device_ops.c 	return cio_resume(sch);
sch               457 drivers/s390/cio/device_ops.c 	struct subchannel *sch;
sch               462 drivers/s390/cio/device_ops.c 	sch = to_subchannel(cdev->dev.parent);
sch               463 drivers/s390/cio/device_ops.c 	return sch->lpm;
sch               477 drivers/s390/cio/device_ops.c 	struct subchannel *sch;
sch               480 drivers/s390/cio/device_ops.c 	sch = to_subchannel(cdev->dev.parent);
sch               482 drivers/s390/cio/device_ops.c 	chpid.id = sch->schib.pmcw.chpid[chp_idx];
sch               496 drivers/s390/cio/device_ops.c 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
sch               502 drivers/s390/cio/device_ops.c 	chpid.id = sch->schib.pmcw.chpid[chp_idx];
sch               543 drivers/s390/cio/device_ops.c 	struct subchannel *sch;
sch               546 drivers/s390/cio/device_ops.c 	sch = to_subchannel(cdev->dev.parent);
sch               547 drivers/s390/cio/device_ops.c 	if (!sch->schib.pmcw.ena)
sch               563 drivers/s390/cio/device_ops.c 		lpm &= sch->lpm;
sch               567 drivers/s390/cio/device_ops.c 	rc = cio_tm_start_key(sch, tcw, lpm, key);
sch               642 drivers/s390/cio/device_ops.c 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
sch               649 drivers/s390/cio/device_ops.c 		mask &= sch->lpm;
sch               651 drivers/s390/cio/device_ops.c 		mask = sch->lpm;
sch               657 drivers/s390/cio/device_ops.c 		chpid.id = sch->schib.pmcw.chpid[i];
sch               687 drivers/s390/cio/device_ops.c 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
sch               689 drivers/s390/cio/device_ops.c 	if (!sch->schib.pmcw.ena)
sch               693 drivers/s390/cio/device_ops.c 	if (!scsw_is_tm(&sch->schib.scsw) ||
sch               694 drivers/s390/cio/device_ops.c 	    !(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_START_PEND))
sch               696 drivers/s390/cio/device_ops.c 	return cio_tm_intrg(sch);
sch               707 drivers/s390/cio/device_ops.c 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
sch               709 drivers/s390/cio/device_ops.c 	*schid = sch->schid;
sch                35 drivers/s390/cio/device_pgid.c 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
sch                43 drivers/s390/cio/device_pgid.c 	if (sch->config.mp != mpath) {
sch                44 drivers/s390/cio/device_pgid.c 		sch->config.mp = mpath;
sch                45 drivers/s390/cio/device_pgid.c 		rc = cio_commit_config(sch);
sch                50 drivers/s390/cio/device_pgid.c 			 sch->vpm);
sch                74 drivers/s390/cio/device_pgid.c 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
sch                77 drivers/s390/cio/device_pgid.c 	req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam & sch->opm &
sch                86 drivers/s390/cio/device_pgid.c 	verify_done(cdev, sch->vpm ? 0 : -EACCES);
sch               106 drivers/s390/cio/device_pgid.c 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
sch               111 drivers/s390/cio/device_pgid.c 		sch->vpm |= req->lpm;
sch               169 drivers/s390/cio/device_pgid.c 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
sch               182 drivers/s390/cio/device_pgid.c 	req->lpm	= sch->schib.pmcw.pam;
sch               196 drivers/s390/cio/device_pgid.c 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
sch               205 drivers/s390/cio/device_pgid.c 	if (req->lpm & sch->opm)
sch               221 drivers/s390/cio/device_pgid.c 	verify_done(cdev, sch->vpm ? 0 : -EACCES);
sch               229 drivers/s390/cio/device_pgid.c 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
sch               234 drivers/s390/cio/device_pgid.c 		sch->vpm |= req->lpm & sch->opm;
sch               334 drivers/s390/cio/device_pgid.c 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
sch               346 drivers/s390/cio/device_pgid.c 		if (sch->opm & lpm) {
sch               381 drivers/s390/cio/device_pgid.c 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
sch               397 drivers/s390/cio/device_pgid.c 		sch->vpm = donepm & sch->opm;
sch               406 drivers/s390/cio/device_pgid.c 		      id->devno, rc, cdev->private->pgid_valid_mask, sch->vpm,
sch               416 drivers/s390/cio/device_pgid.c 			verify_done(cdev, sch->vpm == 0 ? -EACCES : 0);
sch               455 drivers/s390/cio/device_pgid.c 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
sch               459 drivers/s390/cio/device_pgid.c 	req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam &
sch               512 drivers/s390/cio/device_pgid.c 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
sch               516 drivers/s390/cio/device_pgid.c 	sch->vpm = 0;
sch               517 drivers/s390/cio/device_pgid.c 	sch->lpm = sch->schib.pmcw.pam;
sch               523 drivers/s390/cio/device_pgid.c 	cdev->private->pgid_todo_mask = sch->schib.pmcw.pam;
sch               576 drivers/s390/cio/device_pgid.c 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
sch               583 drivers/s390/cio/device_pgid.c 	if (sch->config.mp) {
sch               584 drivers/s390/cio/device_pgid.c 		sch->config.mp = 0;
sch               585 drivers/s390/cio/device_pgid.c 		rc = cio_commit_config(sch);
sch               603 drivers/s390/cio/device_pgid.c 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
sch               613 drivers/s390/cio/device_pgid.c 	req->lpm	= sch->schib.pmcw.pam & sch->opm;
sch               664 drivers/s390/cio/device_pgid.c 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
sch               673 drivers/s390/cio/device_pgid.c 	req->lpm	= sch->schib.pmcw.pam & sch->opm;
sch               685 drivers/s390/cio/device_pgid.c 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
sch               700 drivers/s390/cio/device_pgid.c 	spin_lock_irq(sch->lock);
sch               701 drivers/s390/cio/device_pgid.c 	rc = cio_enable_subchannel(sch, (u32) (addr_t) sch);
sch               707 drivers/s390/cio/device_pgid.c 	spin_unlock_irq(sch->lock);
sch               711 drivers/s390/cio/device_pgid.c 		spin_lock_irq(sch->lock);
sch               713 drivers/s390/cio/device_pgid.c 		spin_unlock_irq(sch->lock);
sch               718 drivers/s390/cio/device_pgid.c 	spin_lock_irq(sch->lock);
sch               719 drivers/s390/cio/device_pgid.c 	cio_disable_subchannel(sch);
sch               722 drivers/s390/cio/device_pgid.c 	spin_unlock_irq(sch->lock);
sch                30 drivers/s390/cio/device_status.c 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
sch                41 drivers/s390/cio/device_status.c 		      cdev->private->dev_id.devno, sch->schid.ssid,
sch                42 drivers/s390/cio/device_status.c 		      sch->schid.sch_no,
sch                44 drivers/s390/cio/device_status.c 	sprintf(dbf_text, "chk%x", sch->schid.sch_no);
sch                55 drivers/s390/cio/device_status.c 	struct subchannel *sch;
sch                57 drivers/s390/cio/device_status.c 	sch = to_subchannel(cdev->dev.parent);
sch                58 drivers/s390/cio/device_status.c 	if (cio_update_schib(sch))
sch                63 drivers/s390/cio/device_status.c 		      sch->schid.ssid, sch->schid.sch_no,
sch                64 drivers/s390/cio/device_status.c 		      sch->schib.pmcw.pnom);
sch                66 drivers/s390/cio/device_status.c 	sch->lpm &= ~sch->schib.pmcw.pnom;
sch               313 drivers/s390/cio/device_status.c 	struct subchannel *sch;
sch               317 drivers/s390/cio/device_status.c 	sch = to_subchannel(cdev->dev.parent);
sch               332 drivers/s390/cio/device_status.c 	sense_ccw = &to_io_private(sch)->dma_area->sense_ccw;
sch               338 drivers/s390/cio/device_status.c 	rc = cio_start(sch, sense_ccw, 0xff);
sch                59 drivers/s390/cio/eadm_sch.c static int eadm_subchannel_start(struct subchannel *sch, struct aob *aob)
sch                61 drivers/s390/cio/eadm_sch.c 	union orb *orb = &get_eadm_private(sch)->orb;
sch                66 drivers/s390/cio/eadm_sch.c 	orb->eadm.intparm = (u32)(addr_t)sch;
sch                70 drivers/s390/cio/eadm_sch.c 	EADM_LOG_HEX(6, &sch->schid, sizeof(sch->schid));
sch                72 drivers/s390/cio/eadm_sch.c 	cc = ssch(sch->schid, orb);
sch                75 drivers/s390/cio/eadm_sch.c 		sch->schib.scsw.eadm.actl |= SCSW_ACTL_START_PEND;
sch                86 drivers/s390/cio/eadm_sch.c static int eadm_subchannel_clear(struct subchannel *sch)
sch                90 drivers/s390/cio/eadm_sch.c 	cc = csch(sch->schid);
sch                94 drivers/s390/cio/eadm_sch.c 	sch->schib.scsw.eadm.actl |= SCSW_ACTL_CLEAR_PEND;
sch               101 drivers/s390/cio/eadm_sch.c 	struct subchannel *sch = private->sch;
sch               103 drivers/s390/cio/eadm_sch.c 	spin_lock_irq(sch->lock);
sch               105 drivers/s390/cio/eadm_sch.c 	EADM_LOG_HEX(1, &sch->schid, sizeof(sch->schid));
sch               106 drivers/s390/cio/eadm_sch.c 	if (eadm_subchannel_clear(sch))
sch               108 drivers/s390/cio/eadm_sch.c 	spin_unlock_irq(sch->lock);
sch               111 drivers/s390/cio/eadm_sch.c static void eadm_subchannel_set_timeout(struct subchannel *sch, int expires)
sch               113 drivers/s390/cio/eadm_sch.c 	struct eadm_private *private = get_eadm_private(sch);
sch               127 drivers/s390/cio/eadm_sch.c static void eadm_subchannel_irq(struct subchannel *sch)
sch               129 drivers/s390/cio/eadm_sch.c 	struct eadm_private *private = get_eadm_private(sch);
sch               130 drivers/s390/cio/eadm_sch.c 	struct eadm_scsw *scsw = &sch->schib.scsw.eadm;
sch               146 drivers/s390/cio/eadm_sch.c 	eadm_subchannel_set_timeout(sch, 0);
sch               152 drivers/s390/cio/eadm_sch.c 		css_sched_sch_todo(sch, SCH_TODO_EVAL);
sch               165 drivers/s390/cio/eadm_sch.c 	struct subchannel *sch;
sch               170 drivers/s390/cio/eadm_sch.c 		sch = private->sch;
sch               171 drivers/s390/cio/eadm_sch.c 		spin_lock(sch->lock);
sch               175 drivers/s390/cio/eadm_sch.c 			spin_unlock(sch->lock);
sch               178 drivers/s390/cio/eadm_sch.c 			return sch;
sch               180 drivers/s390/cio/eadm_sch.c 		spin_unlock(sch->lock);
sch               190 drivers/s390/cio/eadm_sch.c 	struct subchannel *sch;
sch               194 drivers/s390/cio/eadm_sch.c 	sch = eadm_get_idle_sch();
sch               195 drivers/s390/cio/eadm_sch.c 	if (!sch)
sch               198 drivers/s390/cio/eadm_sch.c 	spin_lock_irqsave(sch->lock, flags);
sch               199 drivers/s390/cio/eadm_sch.c 	eadm_subchannel_set_timeout(sch, EADM_TIMEOUT);
sch               200 drivers/s390/cio/eadm_sch.c 	ret = eadm_subchannel_start(sch, aob);
sch               205 drivers/s390/cio/eadm_sch.c 	eadm_subchannel_set_timeout(sch, 0);
sch               206 drivers/s390/cio/eadm_sch.c 	private = get_eadm_private(sch);
sch               208 drivers/s390/cio/eadm_sch.c 	css_sched_sch_todo(sch, SCH_TODO_EVAL);
sch               211 drivers/s390/cio/eadm_sch.c 	spin_unlock_irqrestore(sch->lock, flags);
sch               217 drivers/s390/cio/eadm_sch.c static int eadm_subchannel_probe(struct subchannel *sch)
sch               229 drivers/s390/cio/eadm_sch.c 	spin_lock_irq(sch->lock);
sch               230 drivers/s390/cio/eadm_sch.c 	set_eadm_private(sch, private);
sch               232 drivers/s390/cio/eadm_sch.c 	private->sch = sch;
sch               233 drivers/s390/cio/eadm_sch.c 	sch->isc = EADM_SCH_ISC;
sch               234 drivers/s390/cio/eadm_sch.c 	ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
sch               236 drivers/s390/cio/eadm_sch.c 		set_eadm_private(sch, NULL);
sch               237 drivers/s390/cio/eadm_sch.c 		spin_unlock_irq(sch->lock);
sch               241 drivers/s390/cio/eadm_sch.c 	spin_unlock_irq(sch->lock);
sch               247 drivers/s390/cio/eadm_sch.c 	if (dev_get_uevent_suppress(&sch->dev)) {
sch               248 drivers/s390/cio/eadm_sch.c 		dev_set_uevent_suppress(&sch->dev, 0);
sch               249 drivers/s390/cio/eadm_sch.c 		kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
sch               255 drivers/s390/cio/eadm_sch.c static void eadm_quiesce(struct subchannel *sch)
sch               257 drivers/s390/cio/eadm_sch.c 	struct eadm_private *private = get_eadm_private(sch);
sch               261 drivers/s390/cio/eadm_sch.c 	spin_lock_irq(sch->lock);
sch               265 drivers/s390/cio/eadm_sch.c 	if (eadm_subchannel_clear(sch))
sch               269 drivers/s390/cio/eadm_sch.c 	spin_unlock_irq(sch->lock);
sch               273 drivers/s390/cio/eadm_sch.c 	spin_lock_irq(sch->lock);
sch               277 drivers/s390/cio/eadm_sch.c 	eadm_subchannel_set_timeout(sch, 0);
sch               279 drivers/s390/cio/eadm_sch.c 		ret = cio_disable_subchannel(sch);
sch               282 drivers/s390/cio/eadm_sch.c 	spin_unlock_irq(sch->lock);
sch               285 drivers/s390/cio/eadm_sch.c static int eadm_subchannel_remove(struct subchannel *sch)
sch               287 drivers/s390/cio/eadm_sch.c 	struct eadm_private *private = get_eadm_private(sch);
sch               293 drivers/s390/cio/eadm_sch.c 	eadm_quiesce(sch);
sch               295 drivers/s390/cio/eadm_sch.c 	spin_lock_irq(sch->lock);
sch               296 drivers/s390/cio/eadm_sch.c 	set_eadm_private(sch, NULL);
sch               297 drivers/s390/cio/eadm_sch.c 	spin_unlock_irq(sch->lock);
sch               304 drivers/s390/cio/eadm_sch.c static void eadm_subchannel_shutdown(struct subchannel *sch)
sch               306 drivers/s390/cio/eadm_sch.c 	eadm_quiesce(sch);
sch               309 drivers/s390/cio/eadm_sch.c static int eadm_subchannel_freeze(struct subchannel *sch)
sch               311 drivers/s390/cio/eadm_sch.c 	return cio_disable_subchannel(sch);
sch               314 drivers/s390/cio/eadm_sch.c static int eadm_subchannel_restore(struct subchannel *sch)
sch               316 drivers/s390/cio/eadm_sch.c 	return cio_enable_subchannel(sch, (u32)(unsigned long)sch);
sch               329 drivers/s390/cio/eadm_sch.c static int eadm_subchannel_sch_event(struct subchannel *sch, int process)
sch               334 drivers/s390/cio/eadm_sch.c 	spin_lock_irqsave(sch->lock, flags);
sch               335 drivers/s390/cio/eadm_sch.c 	if (!device_is_registered(&sch->dev))
sch               338 drivers/s390/cio/eadm_sch.c 	if (work_pending(&sch->todo_work))
sch               341 drivers/s390/cio/eadm_sch.c 	if (cio_update_schib(sch)) {
sch               342 drivers/s390/cio/eadm_sch.c 		css_sched_sch_todo(sch, SCH_TODO_UNREG);
sch               345 drivers/s390/cio/eadm_sch.c 	private = get_eadm_private(sch);
sch               350 drivers/s390/cio/eadm_sch.c 	spin_unlock_irqrestore(sch->lock, flags);
sch                15 drivers/s390/cio/eadm_sch.h 	struct subchannel *sch;
sch                32 drivers/s390/cio/io_sch.h static inline struct ccw_device *sch_get_cdev(struct subchannel *sch)
sch                34 drivers/s390/cio/io_sch.h 	struct io_subchannel_private *priv = to_io_private(sch);
sch                38 drivers/s390/cio/io_sch.h static inline void sch_set_cdev(struct subchannel *sch,
sch                41 drivers/s390/cio/io_sch.h 	struct io_subchannel_private *priv = to_io_private(sch);
sch               132 drivers/s390/cio/io_sch.h 	struct subchannel *sch;
sch               322 drivers/s390/cio/qdio_setup.c 	    (ssqd->qdio_ssqd.sch != schid->sch_no))
sch                36 drivers/s390/cio/vfio_ccw_drv.c int vfio_ccw_sch_quiesce(struct subchannel *sch)
sch                38 drivers/s390/cio/vfio_ccw_drv.c 	struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
sch                42 drivers/s390/cio/vfio_ccw_drv.c 	spin_lock_irq(sch->lock);
sch                43 drivers/s390/cio/vfio_ccw_drv.c 	if (!sch->schib.pmcw.ena)
sch                45 drivers/s390/cio/vfio_ccw_drv.c 	ret = cio_disable_subchannel(sch);
sch                52 drivers/s390/cio/vfio_ccw_drv.c 		ret = cio_cancel_halt_clear(sch, &iretry);
sch                56 drivers/s390/cio/vfio_ccw_drv.c 			       sch->schid.ssid, sch->schid.sch_no);
sch                65 drivers/s390/cio/vfio_ccw_drv.c 		spin_unlock_irq(sch->lock);
sch                72 drivers/s390/cio/vfio_ccw_drv.c 		spin_lock_irq(sch->lock);
sch                73 drivers/s390/cio/vfio_ccw_drv.c 		ret = cio_disable_subchannel(sch);
sch                77 drivers/s390/cio/vfio_ccw_drv.c 	spin_unlock_irq(sch->lock);
sch               111 drivers/s390/cio/vfio_ccw_drv.c static void vfio_ccw_sch_irq(struct subchannel *sch)
sch               113 drivers/s390/cio/vfio_ccw_drv.c 	struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
sch               119 drivers/s390/cio/vfio_ccw_drv.c static int vfio_ccw_sch_probe(struct subchannel *sch)
sch               121 drivers/s390/cio/vfio_ccw_drv.c 	struct pmcw *pmcw = &sch->schib.pmcw;
sch               126 drivers/s390/cio/vfio_ccw_drv.c 		dev_warn(&sch->dev, "vfio: ccw: does not support QDIO: %s\n",
sch               127 drivers/s390/cio/vfio_ccw_drv.c 			 dev_name(&sch->dev));
sch               150 drivers/s390/cio/vfio_ccw_drv.c 	private->sch = sch;
sch               151 drivers/s390/cio/vfio_ccw_drv.c 	dev_set_drvdata(&sch->dev, private);
sch               154 drivers/s390/cio/vfio_ccw_drv.c 	spin_lock_irq(sch->lock);
sch               156 drivers/s390/cio/vfio_ccw_drv.c 	sch->isc = VFIO_CCW_ISC;
sch               157 drivers/s390/cio/vfio_ccw_drv.c 	ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
sch               158 drivers/s390/cio/vfio_ccw_drv.c 	spin_unlock_irq(sch->lock);
sch               166 drivers/s390/cio/vfio_ccw_drv.c 	ret = vfio_ccw_mdev_reg(sch);
sch               170 drivers/s390/cio/vfio_ccw_drv.c 	if (dev_get_uevent_suppress(&sch->dev)) {
sch               171 drivers/s390/cio/vfio_ccw_drv.c 		dev_set_uevent_suppress(&sch->dev, 0);
sch               172 drivers/s390/cio/vfio_ccw_drv.c 		kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
sch               176 drivers/s390/cio/vfio_ccw_drv.c 			   sch->schid.cssid, sch->schid.ssid,
sch               177 drivers/s390/cio/vfio_ccw_drv.c 			   sch->schid.sch_no);
sch               181 drivers/s390/cio/vfio_ccw_drv.c 	cio_disable_subchannel(sch);
sch               183 drivers/s390/cio/vfio_ccw_drv.c 	dev_set_drvdata(&sch->dev, NULL);
sch               193 drivers/s390/cio/vfio_ccw_drv.c static int vfio_ccw_sch_remove(struct subchannel *sch)
sch               195 drivers/s390/cio/vfio_ccw_drv.c 	struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
sch               197 drivers/s390/cio/vfio_ccw_drv.c 	vfio_ccw_sch_quiesce(sch);
sch               199 drivers/s390/cio/vfio_ccw_drv.c 	vfio_ccw_mdev_unreg(sch);
sch               201 drivers/s390/cio/vfio_ccw_drv.c 	dev_set_drvdata(&sch->dev, NULL);
sch               209 drivers/s390/cio/vfio_ccw_drv.c 			   sch->schid.cssid, sch->schid.ssid,
sch               210 drivers/s390/cio/vfio_ccw_drv.c 			   sch->schid.sch_no);
sch               214 drivers/s390/cio/vfio_ccw_drv.c static void vfio_ccw_sch_shutdown(struct subchannel *sch)
sch               216 drivers/s390/cio/vfio_ccw_drv.c 	vfio_ccw_sch_quiesce(sch);
sch               229 drivers/s390/cio/vfio_ccw_drv.c static int vfio_ccw_sch_event(struct subchannel *sch, int process)
sch               231 drivers/s390/cio/vfio_ccw_drv.c 	struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
sch               235 drivers/s390/cio/vfio_ccw_drv.c 	spin_lock_irqsave(sch->lock, flags);
sch               236 drivers/s390/cio/vfio_ccw_drv.c 	if (!device_is_registered(&sch->dev))
sch               239 drivers/s390/cio/vfio_ccw_drv.c 	if (work_pending(&sch->todo_work))
sch               242 drivers/s390/cio/vfio_ccw_drv.c 	if (cio_update_schib(sch)) {
sch               248 drivers/s390/cio/vfio_ccw_drv.c 	private = dev_get_drvdata(&sch->dev);
sch               256 drivers/s390/cio/vfio_ccw_drv.c 	spin_unlock_irqrestore(sch->lock, flags);
sch                23 drivers/s390/cio/vfio_ccw_fsm.c 	struct subchannel *sch;
sch                30 drivers/s390/cio/vfio_ccw_fsm.c 	sch = private->sch;
sch                32 drivers/s390/cio/vfio_ccw_fsm.c 	spin_lock_irqsave(sch->lock, flags);
sch                34 drivers/s390/cio/vfio_ccw_fsm.c 	orb = cp_get_orb(&private->cp, (u32)(addr_t)sch, sch->lpm);
sch                41 drivers/s390/cio/vfio_ccw_fsm.c 	VFIO_CCW_TRACE_EVENT(5, dev_name(&sch->dev));
sch                44 drivers/s390/cio/vfio_ccw_fsm.c 	ccode = ssch(sch->schid, orb);
sch                53 drivers/s390/cio/vfio_ccw_fsm.c 		sch->schib.scsw.cmd.actl |= SCSW_ACTL_START_PEND;
sch                65 drivers/s390/cio/vfio_ccw_fsm.c 			sch->lpm &= ~lpm;
sch                67 drivers/s390/cio/vfio_ccw_fsm.c 			sch->lpm = 0;
sch                69 drivers/s390/cio/vfio_ccw_fsm.c 		if (cio_update_schib(sch))
sch                72 drivers/s390/cio/vfio_ccw_fsm.c 			ret = sch->lpm ? -EACCES : -ENODEV;
sch                79 drivers/s390/cio/vfio_ccw_fsm.c 	spin_unlock_irqrestore(sch->lock, flags);
sch                85 drivers/s390/cio/vfio_ccw_fsm.c 	struct subchannel *sch;
sch                90 drivers/s390/cio/vfio_ccw_fsm.c 	sch = private->sch;
sch                92 drivers/s390/cio/vfio_ccw_fsm.c 	spin_lock_irqsave(sch->lock, flags);
sch                95 drivers/s390/cio/vfio_ccw_fsm.c 	VFIO_CCW_TRACE_EVENT(2, dev_name(&sch->dev));
sch                98 drivers/s390/cio/vfio_ccw_fsm.c 	ccode = hsch(sch->schid);
sch               107 drivers/s390/cio/vfio_ccw_fsm.c 		sch->schib.scsw.cmd.actl |= SCSW_ACTL_HALT_PEND;
sch               120 drivers/s390/cio/vfio_ccw_fsm.c 	spin_unlock_irqrestore(sch->lock, flags);
sch               126 drivers/s390/cio/vfio_ccw_fsm.c 	struct subchannel *sch;
sch               131 drivers/s390/cio/vfio_ccw_fsm.c 	sch = private->sch;
sch               133 drivers/s390/cio/vfio_ccw_fsm.c 	spin_lock_irqsave(sch->lock, flags);
sch               136 drivers/s390/cio/vfio_ccw_fsm.c 	VFIO_CCW_TRACE_EVENT(2, dev_name(&sch->dev));
sch               139 drivers/s390/cio/vfio_ccw_fsm.c 	ccode = csch(sch->schid);
sch               148 drivers/s390/cio/vfio_ccw_fsm.c 		sch->schib.scsw.cmd.actl = SCSW_ACTL_CLEAR_PEND;
sch               158 drivers/s390/cio/vfio_ccw_fsm.c 	spin_unlock_irqrestore(sch->lock, flags);
sch               165 drivers/s390/cio/vfio_ccw_fsm.c 	struct subchannel *sch = private->sch;
sch               168 drivers/s390/cio/vfio_ccw_fsm.c 	VFIO_CCW_TRACE_EVENT(2, dev_name(&sch->dev));
sch               174 drivers/s390/cio/vfio_ccw_fsm.c 	css_sched_sch_todo(sch, SCH_TODO_UNREG);
sch               226 drivers/s390/cio/vfio_ccw_fsm.c 	struct subchannel *sch = private->sch;
sch               232 drivers/s390/cio/vfio_ccw_fsm.c 	cio_disable_subchannel(sch);
sch               236 drivers/s390/cio/vfio_ccw_fsm.c 	return p->sch->schid;
sch               358 drivers/s390/cio/vfio_ccw_fsm.c 	VFIO_CCW_TRACE_EVENT(6, dev_name(&private->sch->dev));
sch                23 drivers/s390/cio/vfio_ccw_ops.c 	struct subchannel *sch;
sch                27 drivers/s390/cio/vfio_ccw_ops.c 	sch = private->sch;
sch                36 drivers/s390/cio/vfio_ccw_ops.c 	ret = vfio_ccw_sch_quiesce(sch);
sch                40 drivers/s390/cio/vfio_ccw_ops.c 	ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
sch               128 drivers/s390/cio/vfio_ccw_ops.c 			   mdev_uuid(mdev), private->sch->schid.cssid,
sch               129 drivers/s390/cio/vfio_ccw_ops.c 			   private->sch->schid.ssid,
sch               130 drivers/s390/cio/vfio_ccw_ops.c 			   private->sch->schid.sch_no);
sch               141 drivers/s390/cio/vfio_ccw_ops.c 			   mdev_uuid(mdev), private->sch->schid.cssid,
sch               142 drivers/s390/cio/vfio_ccw_ops.c 			   private->sch->schid.ssid,
sch               143 drivers/s390/cio/vfio_ccw_ops.c 			   private->sch->schid.sch_no);
sch               147 drivers/s390/cio/vfio_ccw_ops.c 		if (!vfio_ccw_sch_quiesce(private->sch))
sch               589 drivers/s390/cio/vfio_ccw_ops.c int vfio_ccw_mdev_reg(struct subchannel *sch)
sch               591 drivers/s390/cio/vfio_ccw_ops.c 	return mdev_register_device(&sch->dev, &vfio_ccw_mdev_ops);
sch               594 drivers/s390/cio/vfio_ccw_ops.c void vfio_ccw_mdev_unreg(struct subchannel *sch)
sch               596 drivers/s390/cio/vfio_ccw_ops.c 	mdev_unregister_device(&sch->dev);
sch                79 drivers/s390/cio/vfio_ccw_private.h 	struct subchannel	*sch;
sch                99 drivers/s390/cio/vfio_ccw_private.h extern int vfio_ccw_mdev_reg(struct subchannel *sch);
sch               100 drivers/s390/cio/vfio_ccw_private.h extern void vfio_ccw_mdev_unreg(struct subchannel *sch);
sch               102 drivers/s390/cio/vfio_ccw_private.h extern int vfio_ccw_sch_quiesce(struct subchannel *sch);
sch                93 include/net/pkt_sched.h struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops,
sch                59 include/net/sch_generic.h 					   struct Qdisc *sch,
sch                61 include/net/sch_generic.h 	struct sk_buff *	(*dequeue)(struct Qdisc *sch);
sch               214 include/net/sch_generic.h 	struct tcf_block *	(*tcf_block)(struct Qdisc *sch,
sch               243 include/net/sch_generic.h 					   struct Qdisc *sch,
sch               248 include/net/sch_generic.h 	int			(*init)(struct Qdisc *sch, struct nlattr *arg,
sch               252 include/net/sch_generic.h 	int			(*change)(struct Qdisc *sch,
sch               255 include/net/sch_generic.h 	void			(*attach)(struct Qdisc *sch);
sch               261 include/net/sch_generic.h 	void			(*ingress_block_set)(struct Qdisc *sch,
sch               263 include/net/sch_generic.h 	void			(*egress_block_set)(struct Qdisc *sch,
sch               265 include/net/sch_generic.h 	u32			(*ingress_block_get)(struct Qdisc *sch);
sch               266 include/net/sch_generic.h 	u32			(*egress_block_get)(struct Qdisc *sch);
sch               647 include/net/sch_generic.h void qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch,
sch               661 include/net/sch_generic.h qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch,
sch               782 include/net/sch_generic.h 					   const struct Qdisc *sch)
sch               785 include/net/sch_generic.h 	struct qdisc_size_table *stab = rcu_dereference_bh(sch->stab);
sch               792 include/net/sch_generic.h static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
sch               795 include/net/sch_generic.h 	qdisc_calculate_pkt_len(skb, sch);
sch               796 include/net/sch_generic.h 	return sch->enqueue(skb, sch, to_free);
sch               830 include/net/sch_generic.h static inline void qdisc_bstats_cpu_update(struct Qdisc *sch,
sch               833 include/net/sch_generic.h 	bstats_cpu_update(this_cpu_ptr(sch->cpu_bstats), skb);
sch               836 include/net/sch_generic.h static inline void qdisc_bstats_update(struct Qdisc *sch,
sch               839 include/net/sch_generic.h 	bstats_update(&sch->bstats, skb);
sch               842 include/net/sch_generic.h static inline void qdisc_qstats_backlog_dec(struct Qdisc *sch,
sch               845 include/net/sch_generic.h 	sch->qstats.backlog -= qdisc_pkt_len(skb);
sch               848 include/net/sch_generic.h static inline void qdisc_qstats_cpu_backlog_dec(struct Qdisc *sch,
sch               851 include/net/sch_generic.h 	this_cpu_sub(sch->cpu_qstats->backlog, qdisc_pkt_len(skb));
sch               854 include/net/sch_generic.h static inline void qdisc_qstats_backlog_inc(struct Qdisc *sch,
sch               857 include/net/sch_generic.h 	sch->qstats.backlog += qdisc_pkt_len(skb);
sch               860 include/net/sch_generic.h static inline void qdisc_qstats_cpu_backlog_inc(struct Qdisc *sch,
sch               863 include/net/sch_generic.h 	this_cpu_add(sch->cpu_qstats->backlog, qdisc_pkt_len(skb));
sch               866 include/net/sch_generic.h static inline void qdisc_qstats_cpu_qlen_inc(struct Qdisc *sch)
sch               868 include/net/sch_generic.h 	this_cpu_inc(sch->cpu_qstats->qlen);
sch               871 include/net/sch_generic.h static inline void qdisc_qstats_cpu_qlen_dec(struct Qdisc *sch)
sch               873 include/net/sch_generic.h 	this_cpu_dec(sch->cpu_qstats->qlen);
sch               876 include/net/sch_generic.h static inline void qdisc_qstats_cpu_requeues_inc(struct Qdisc *sch)
sch               878 include/net/sch_generic.h 	this_cpu_inc(sch->cpu_qstats->requeues);
sch               881 include/net/sch_generic.h static inline void __qdisc_qstats_drop(struct Qdisc *sch, int count)
sch               883 include/net/sch_generic.h 	sch->qstats.drops += count;
sch               896 include/net/sch_generic.h static inline void qdisc_qstats_drop(struct Qdisc *sch)
sch               898 include/net/sch_generic.h 	qstats_drop_inc(&sch->qstats);
sch               901 include/net/sch_generic.h static inline void qdisc_qstats_cpu_drop(struct Qdisc *sch)
sch               903 include/net/sch_generic.h 	this_cpu_inc(sch->cpu_qstats->drops);
sch               906 include/net/sch_generic.h static inline void qdisc_qstats_overlimit(struct Qdisc *sch)
sch               908 include/net/sch_generic.h 	sch->qstats.overlimits++;
sch               911 include/net/sch_generic.h static inline int qdisc_qstats_copy(struct gnet_dump *d, struct Qdisc *sch)
sch               913 include/net/sch_generic.h 	__u32 qlen = qdisc_qlen_sum(sch);
sch               915 include/net/sch_generic.h 	return gnet_stats_copy_queue(d, sch->cpu_qstats, &sch->qstats, qlen);
sch               918 include/net/sch_generic.h static inline void qdisc_qstats_qlen_backlog(struct Qdisc *sch,  __u32 *qlen,
sch               922 include/net/sch_generic.h 	__u32 len = qdisc_qlen_sum(sch);
sch               924 include/net/sch_generic.h 	__gnet_stats_copy_queue(&qstats, sch->cpu_qstats, &sch->qstats, len);
sch               929 include/net/sch_generic.h static inline void qdisc_tree_flush_backlog(struct Qdisc *sch)
sch               933 include/net/sch_generic.h 	qdisc_qstats_qlen_backlog(sch, &qlen, &backlog);
sch               934 include/net/sch_generic.h 	qdisc_tree_reduce_backlog(sch, qlen, backlog);
sch               937 include/net/sch_generic.h static inline void qdisc_purge_queue(struct Qdisc *sch)
sch               941 include/net/sch_generic.h 	qdisc_qstats_qlen_backlog(sch, &qlen, &backlog);
sch               942 include/net/sch_generic.h 	qdisc_reset(sch);
sch               943 include/net/sch_generic.h 	qdisc_tree_reduce_backlog(sch, qlen, backlog);
sch               969 include/net/sch_generic.h static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch)
sch               971 include/net/sch_generic.h 	__qdisc_enqueue_tail(skb, &sch->q);
sch               972 include/net/sch_generic.h 	qdisc_qstats_backlog_inc(sch, skb);
sch              1002 include/net/sch_generic.h static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
sch              1004 include/net/sch_generic.h 	struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
sch              1007 include/net/sch_generic.h 		qdisc_qstats_backlog_dec(sch, skb);
sch              1008 include/net/sch_generic.h 		qdisc_bstats_update(sch, skb);
sch              1033 include/net/sch_generic.h static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
sch              1042 include/net/sch_generic.h 		qdisc_qstats_backlog_dec(sch, skb);
sch              1050 include/net/sch_generic.h static inline unsigned int qdisc_queue_drop_head(struct Qdisc *sch,
sch              1053 include/net/sch_generic.h 	return __qdisc_queue_drop_head(sch, &sch->q, to_free);
sch              1056 include/net/sch_generic.h static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch)
sch              1058 include/net/sch_generic.h 	const struct qdisc_skb_head *qh = &sch->q;
sch              1064 include/net/sch_generic.h static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch)
sch              1066 include/net/sch_generic.h 	struct sk_buff *skb = skb_peek(&sch->gso_skb);
sch              1070 include/net/sch_generic.h 		skb = sch->dequeue(sch);
sch              1073 include/net/sch_generic.h 			__skb_queue_head(&sch->gso_skb, skb);
sch              1075 include/net/sch_generic.h 			qdisc_qstats_backlog_inc(sch, skb);
sch              1076 include/net/sch_generic.h 			sch->q.qlen++;
sch              1083 include/net/sch_generic.h static inline void qdisc_update_stats_at_dequeue(struct Qdisc *sch,
sch              1086 include/net/sch_generic.h 	if (qdisc_is_percpu_stats(sch)) {
sch              1087 include/net/sch_generic.h 		qdisc_qstats_cpu_backlog_dec(sch, skb);
sch              1088 include/net/sch_generic.h 		qdisc_bstats_cpu_update(sch, skb);
sch              1089 include/net/sch_generic.h 		qdisc_qstats_cpu_qlen_dec(sch);
sch              1091 include/net/sch_generic.h 		qdisc_qstats_backlog_dec(sch, skb);
sch              1092 include/net/sch_generic.h 		qdisc_bstats_update(sch, skb);
sch              1093 include/net/sch_generic.h 		sch->q.qlen--;
sch              1097 include/net/sch_generic.h static inline void qdisc_update_stats_at_enqueue(struct Qdisc *sch,
sch              1100 include/net/sch_generic.h 	if (qdisc_is_percpu_stats(sch)) {
sch              1101 include/net/sch_generic.h 		qdisc_qstats_cpu_qlen_inc(sch);
sch              1102 include/net/sch_generic.h 		this_cpu_add(sch->cpu_qstats->backlog, pkt_len);
sch              1104 include/net/sch_generic.h 		sch->qstats.backlog += pkt_len;
sch              1105 include/net/sch_generic.h 		sch->q.qlen++;
sch              1110 include/net/sch_generic.h static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch)
sch              1112 include/net/sch_generic.h 	struct sk_buff *skb = skb_peek(&sch->gso_skb);
sch              1115 include/net/sch_generic.h 		skb = __skb_dequeue(&sch->gso_skb);
sch              1116 include/net/sch_generic.h 		if (qdisc_is_percpu_stats(sch)) {
sch              1117 include/net/sch_generic.h 			qdisc_qstats_cpu_backlog_dec(sch, skb);
sch              1118 include/net/sch_generic.h 			qdisc_qstats_cpu_qlen_dec(sch);
sch              1120 include/net/sch_generic.h 			qdisc_qstats_backlog_dec(sch, skb);
sch              1121 include/net/sch_generic.h 			sch->q.qlen--;
sch              1124 include/net/sch_generic.h 		skb = sch->dequeue(sch);
sch              1146 include/net/sch_generic.h static inline void qdisc_reset_queue(struct Qdisc *sch)
sch              1148 include/net/sch_generic.h 	__qdisc_reset_queue(&sch->q);
sch              1149 include/net/sch_generic.h 	sch->qstats.backlog = 0;
sch              1152 include/net/sch_generic.h static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
sch              1157 include/net/sch_generic.h 	sch_tree_lock(sch);
sch              1162 include/net/sch_generic.h 	sch_tree_unlock(sch);
sch              1167 include/net/sch_generic.h static inline void rtnl_qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
sch              1170 include/net/sch_generic.h 	qdisc_qstats_drop(sch);
sch              1173 include/net/sch_generic.h static inline int qdisc_drop_cpu(struct sk_buff *skb, struct Qdisc *sch,
sch              1177 include/net/sch_generic.h 	qdisc_qstats_cpu_drop(sch);
sch              1182 include/net/sch_generic.h static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch,
sch              1186 include/net/sch_generic.h 	qdisc_qstats_drop(sch);
sch              1191 include/net/sch_generic.h static inline int qdisc_drop_all(struct sk_buff *skb, struct Qdisc *sch,
sch              1195 include/net/sch_generic.h 	qdisc_qstats_drop(sch);
sch               189 net/caif/caif_dev.c 		struct Qdisc *sch;
sch               193 net/caif/caif_dev.c 		sch = rcu_dereference_bh(txq->qdisc);
sch               194 net/caif/caif_dev.c 		if (likely(qdisc_is_empty(sch)))
sch               201 net/caif/caif_dev.c 		if (!(sch->flags & TCQ_F_NOLOCK) && likely(sch->q.qlen < high))
sch              1073 net/netfilter/ipvs/ip_vs_core.c 	struct sctp_chunkhdr *sch, schunk;
sch              1074 net/netfilter/ipvs/ip_vs_core.c 	sch = skb_header_pointer(skb, nh_len + sizeof(struct sctphdr),
sch              1076 net/netfilter/ipvs/ip_vs_core.c 	if (sch == NULL)
sch              1078 net/netfilter/ipvs/ip_vs_core.c 	if (sch->type == SCTP_CID_ABORT)
sch              1106 net/netfilter/ipvs/ip_vs_core.c 		struct sctp_chunkhdr *sch, schunk;
sch              1108 net/netfilter/ipvs/ip_vs_core.c 		sch = skb_header_pointer(skb, iph->len + sizeof(struct sctphdr),
sch              1110 net/netfilter/ipvs/ip_vs_core.c 		if (sch == NULL)
sch              1112 net/netfilter/ipvs/ip_vs_core.c 		return sch->type == SCTP_CID_INIT;
sch                22 net/netfilter/ipvs/ip_vs_proto_sctp.c 	struct sctp_chunkhdr _schunkh, *sch;
sch                29 net/netfilter/ipvs/ip_vs_proto_sctp.c 			sch = skb_header_pointer(skb, iph->len + sizeof(_sctph),
sch                31 net/netfilter/ipvs/ip_vs_proto_sctp.c 			if (sch) {
sch                32 net/netfilter/ipvs/ip_vs_proto_sctp.c 				if (sch->type == SCTP_CID_ABORT ||
sch                34 net/netfilter/ipvs/ip_vs_proto_sctp.c 				      sch->type == SCTP_CID_INIT))
sch               381 net/netfilter/ipvs/ip_vs_proto_sctp.c 	struct sctp_chunkhdr _sctpch, *sch;
sch               393 net/netfilter/ipvs/ip_vs_proto_sctp.c 	sch = skb_header_pointer(skb, cofs, sizeof(_sctpch), &_sctpch);
sch               394 net/netfilter/ipvs/ip_vs_proto_sctp.c 	if (sch == NULL)
sch               397 net/netfilter/ipvs/ip_vs_proto_sctp.c 	chunk_type = sch->type;
sch               409 net/netfilter/ipvs/ip_vs_proto_sctp.c 	if ((sch->type == SCTP_CID_COOKIE_ECHO) ||
sch               410 net/netfilter/ipvs/ip_vs_proto_sctp.c 	    (sch->type == SCTP_CID_COOKIE_ACK)) {
sch               411 net/netfilter/ipvs/ip_vs_proto_sctp.c 		int clen = ntohs(sch->length);
sch               414 net/netfilter/ipvs/ip_vs_proto_sctp.c 			sch = skb_header_pointer(skb, cofs + ALIGN(clen, 4),
sch               416 net/netfilter/ipvs/ip_vs_proto_sctp.c 			if (sch && sch->type == SCTP_CID_ABORT)
sch               417 net/netfilter/ipvs/ip_vs_proto_sctp.c 				chunk_type = sch->type;
sch               154 net/netfilter/nf_conntrack_proto_sctp.c #define for_each_sctp_chunk(skb, sch, _sch, offset, dataoff, count)	\
sch               157 net/netfilter/nf_conntrack_proto_sctp.c 	((sch) = skb_header_pointer((skb), (offset), sizeof(_sch), &(_sch)));	\
sch               158 net/netfilter/nf_conntrack_proto_sctp.c 	(offset) += (ntohs((sch)->length) + 3) & ~3, (count)++)
sch               167 net/netfilter/nf_conntrack_proto_sctp.c 	struct sctp_chunkhdr _sch, *sch;
sch               172 net/netfilter/nf_conntrack_proto_sctp.c 	for_each_sctp_chunk (skb, sch, _sch, offset, dataoff, count) {
sch               173 net/netfilter/nf_conntrack_proto_sctp.c 		pr_debug("Chunk Num: %d  Type: %d\n", count, sch->type);
sch               175 net/netfilter/nf_conntrack_proto_sctp.c 		if (sch->type == SCTP_CID_INIT ||
sch               176 net/netfilter/nf_conntrack_proto_sctp.c 		    sch->type == SCTP_CID_INIT_ACK ||
sch               177 net/netfilter/nf_conntrack_proto_sctp.c 		    sch->type == SCTP_CID_SHUTDOWN_COMPLETE)
sch               185 net/netfilter/nf_conntrack_proto_sctp.c 		if (((sch->type == SCTP_CID_COOKIE_ACK ||
sch               186 net/netfilter/nf_conntrack_proto_sctp.c 		      sch->type == SCTP_CID_COOKIE_ECHO ||
sch               188 net/netfilter/nf_conntrack_proto_sctp.c 		     count != 0) || !sch->length) {
sch               194 net/netfilter/nf_conntrack_proto_sctp.c 			set_bit(sch->type, map);
sch               274 net/netfilter/nf_conntrack_proto_sctp.c 	const struct sctp_chunkhdr *sch;
sch               280 net/netfilter/nf_conntrack_proto_sctp.c 	for_each_sctp_chunk(skb, sch, _sch, offset, dataoff, count) {
sch               282 net/netfilter/nf_conntrack_proto_sctp.c 					   SCTP_CONNTRACK_NONE, sch->type);
sch               292 net/netfilter/nf_conntrack_proto_sctp.c 		if (sch->type == SCTP_CID_INIT) {
sch               307 net/netfilter/nf_conntrack_proto_sctp.c 		} else if (sch->type == SCTP_CID_HEARTBEAT) {
sch               367 net/netfilter/nf_conntrack_proto_sctp.c 	const struct sctp_chunkhdr *sch;
sch               409 net/netfilter/nf_conntrack_proto_sctp.c 	for_each_sctp_chunk (skb, sch, _sch, offset, dataoff, count) {
sch               411 net/netfilter/nf_conntrack_proto_sctp.c 		if (sch->type == SCTP_CID_INIT) {
sch               415 net/netfilter/nf_conntrack_proto_sctp.c 		} else if (sch->type == SCTP_CID_ABORT) {
sch               420 net/netfilter/nf_conntrack_proto_sctp.c 		} else if (sch->type == SCTP_CID_SHUTDOWN_COMPLETE) {
sch               424 net/netfilter/nf_conntrack_proto_sctp.c 			    sch->flags & SCTP_CHUNK_FLAG_T)
sch               426 net/netfilter/nf_conntrack_proto_sctp.c 		} else if (sch->type == SCTP_CID_COOKIE_ECHO) {
sch               430 net/netfilter/nf_conntrack_proto_sctp.c 		} else if (sch->type == SCTP_CID_HEARTBEAT ||
sch               431 net/netfilter/nf_conntrack_proto_sctp.c 			   sch->type == SCTP_CID_HEARTBEAT_ACK) {
sch               443 net/netfilter/nf_conntrack_proto_sctp.c 		new_state = sctp_new_state(dir, old_state, sch->type);
sch               449 net/netfilter/nf_conntrack_proto_sctp.c 				 dir, sch->type, old_state);
sch               454 net/netfilter/nf_conntrack_proto_sctp.c 		if (sch->type == SCTP_CID_INIT ||
sch               455 net/netfilter/nf_conntrack_proto_sctp.c 		    sch->type == SCTP_CID_INIT_ACK) {
sch                46 net/netfilter/xt_sctp.c 	const struct sctp_chunkhdr *sch;
sch                60 net/netfilter/xt_sctp.c 		sch = skb_header_pointer(skb, offset, sizeof(_sch), &_sch);
sch                61 net/netfilter/xt_sctp.c 		if (sch == NULL || sch->length == 0) {
sch                69 net/netfilter/xt_sctp.c 			 ++i, offset, sch->type, htons(sch->length),
sch                70 net/netfilter/xt_sctp.c 			 sch->flags);
sch                72 net/netfilter/xt_sctp.c 		offset += SCTP_PAD4(ntohs(sch->length));
sch                76 net/netfilter/xt_sctp.c 		if (SCTP_CHUNKMAP_IS_SET(info->chunkmap, sch->type)) {
sch                80 net/netfilter/xt_sctp.c 					sch->type, sch->flags)) {
sch                87 net/netfilter/xt_sctp.c 				    sch->type, sch->flags))
sch                88 net/netfilter/xt_sctp.c 					SCTP_CHUNKMAP_CLEAR(chunkmapcopy, sch->type);
sch                93 net/netfilter/xt_sctp.c 				    sch->type, sch->flags))
sch               657 net/sched/sch_api.c void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash)
sch               677 net/sched/sch_api.c 	sch_tree_lock(sch);
sch               687 net/sched/sch_api.c 	sch_tree_unlock(sch);
sch               753 net/sched/sch_api.c void qdisc_tree_reduce_backlog(struct Qdisc *sch, int n, int len)
sch               755 net/sched/sch_api.c 	bool qdisc_is_offloaded = sch->flags & TCQ_F_OFFLOADED;
sch               766 net/sched/sch_api.c 	while ((parentid = sch->parent)) {
sch               770 net/sched/sch_api.c 		if (sch->flags & TCQ_F_NOPARENT)
sch               781 net/sched/sch_api.c 		notify = !sch->q.qlen && !WARN_ON_ONCE(!n &&
sch               784 net/sched/sch_api.c 		sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid));
sch               785 net/sched/sch_api.c 		if (sch == NULL) {
sch               789 net/sched/sch_api.c 		cops = sch->ops->cl_ops;
sch               791 net/sched/sch_api.c 			cl = cops->find(sch, parentid);
sch               792 net/sched/sch_api.c 			cops->qlen_notify(sch, cl);
sch               794 net/sched/sch_api.c 		sch->q.qlen -= n;
sch               795 net/sched/sch_api.c 		sch->qstats.backlog -= len;
sch               796 net/sched/sch_api.c 		__qdisc_qstats_drop(sch, drops);
sch               802 net/sched/sch_api.c int qdisc_offload_dump_helper(struct Qdisc *sch, enum tc_setup_type type,
sch               805 net/sched/sch_api.c 	struct net_device *dev = qdisc_dev(sch);
sch               808 net/sched/sch_api.c 	sch->flags &= ~TCQ_F_OFFLOADED;
sch               817 net/sched/sch_api.c 		sch->flags |= TCQ_F_OFFLOADED;
sch               823 net/sched/sch_api.c void qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch,
sch               844 net/sched/sch_api.c 	any_qdisc_is_offloaded |= sch && sch->flags & TCQ_F_OFFLOADED;
sch               998 net/sched/sch_api.c static void qdisc_clear_nolock(struct Qdisc *sch)
sch              1000 net/sched/sch_api.c 	sch->flags &= ~TCQ_F_NOLOCK;
sch              1001 net/sched/sch_api.c 	if (!(sch->flags & TCQ_F_CPUSTATS))
sch              1004 net/sched/sch_api.c 	free_percpu(sch->cpu_bstats);
sch              1005 net/sched/sch_api.c 	free_percpu(sch->cpu_qstats);
sch              1006 net/sched/sch_api.c 	sch->cpu_bstats = NULL;
sch              1007 net/sched/sch_api.c 	sch->cpu_qstats = NULL;
sch              1008 net/sched/sch_api.c 	sch->flags &= ~TCQ_F_CPUSTATS;
sch              1108 net/sched/sch_api.c static int qdisc_block_indexes_set(struct Qdisc *sch, struct nlattr **tca,
sch              1120 net/sched/sch_api.c 		if (!sch->ops->ingress_block_set) {
sch              1124 net/sched/sch_api.c 		sch->ops->ingress_block_set(sch, block_index);
sch              1133 net/sched/sch_api.c 		if (!sch->ops->egress_block_set) {
sch              1137 net/sched/sch_api.c 		sch->ops->egress_block_set(sch, block_index);
sch              1156 net/sched/sch_api.c 	struct Qdisc *sch;
sch              1195 net/sched/sch_api.c 	sch = qdisc_alloc(dev_queue, ops, extack);
sch              1196 net/sched/sch_api.c 	if (IS_ERR(sch)) {
sch              1197 net/sched/sch_api.c 		err = PTR_ERR(sch);
sch              1201 net/sched/sch_api.c 	sch->parent = parent;
sch              1204 net/sched/sch_api.c 		sch->flags |= TCQ_F_INGRESS;
sch              1216 net/sched/sch_api.c 			sch->flags |= TCQ_F_ONETXQUEUE;
sch              1219 net/sched/sch_api.c 	sch->handle = handle;
sch              1232 net/sched/sch_api.c 	err = qdisc_block_indexes_set(sch, tca, extack);
sch              1237 net/sched/sch_api.c 		err = ops->init(sch, tca[TCA_OPTIONS], extack);
sch              1248 net/sched/sch_api.c 		rcu_assign_pointer(sch->stab, stab);
sch              1254 net/sched/sch_api.c 		if (sch->flags & TCQ_F_MQROOT) {
sch              1259 net/sched/sch_api.c 		if (sch->parent != TC_H_ROOT &&
sch              1260 net/sched/sch_api.c 		    !(sch->flags & TCQ_F_INGRESS) &&
sch              1262 net/sched/sch_api.c 			running = qdisc_root_sleeping_running(sch);
sch              1264 net/sched/sch_api.c 			running = &sch->running;
sch              1266 net/sched/sch_api.c 		err = gen_new_estimator(&sch->bstats,
sch              1267 net/sched/sch_api.c 					sch->cpu_bstats,
sch              1268 net/sched/sch_api.c 					&sch->rate_est,
sch              1278 net/sched/sch_api.c 	qdisc_hash_add(sch, false);
sch              1280 net/sched/sch_api.c 	return sch;
sch              1285 net/sched/sch_api.c 		ops->destroy(sch);
sch              1288 net/sched/sch_api.c 	qdisc_free(sch);
sch              1300 net/sched/sch_api.c 	qdisc_put_stab(rtnl_dereference(sch->stab));
sch              1302 net/sched/sch_api.c 		ops->destroy(sch);
sch              1306 net/sched/sch_api.c static int qdisc_change(struct Qdisc *sch, struct nlattr **tca,
sch              1313 net/sched/sch_api.c 		if (!sch->ops->change) {
sch              1321 net/sched/sch_api.c 		err = sch->ops->change(sch, tca[TCA_OPTIONS], extack);
sch              1332 net/sched/sch_api.c 	ostab = rtnl_dereference(sch->stab);
sch              1333 net/sched/sch_api.c 	rcu_assign_pointer(sch->stab, stab);
sch              1339 net/sched/sch_api.c 		if (sch->flags & TCQ_F_MQROOT)
sch              1341 net/sched/sch_api.c 		gen_replace_estimator(&sch->bstats,
sch              1342 net/sched/sch_api.c 				      sch->cpu_bstats,
sch              1343 net/sched/sch_api.c 				      &sch->rate_est,
sch              1345 net/sched/sch_api.c 				      qdisc_root_sleeping_running(sch),
sch                73 net/sched/sch_atm.c static inline struct atm_flow_data *lookup_flow(struct Qdisc *sch, u32 classid)
sch                75 net/sched/sch_atm.c 	struct atm_qdisc_data *p = qdisc_priv(sch);
sch                85 net/sched/sch_atm.c static int atm_tc_graft(struct Qdisc *sch, unsigned long arg,
sch                89 net/sched/sch_atm.c 	struct atm_qdisc_data *p = qdisc_priv(sch);
sch                93 net/sched/sch_atm.c 		sch, p, flow, new, old);
sch               105 net/sched/sch_atm.c static struct Qdisc *atm_tc_leaf(struct Qdisc *sch, unsigned long cl)
sch               109 net/sched/sch_atm.c 	pr_debug("atm_tc_leaf(sch %p,flow %p)\n", sch, flow);
sch               113 net/sched/sch_atm.c static unsigned long atm_tc_find(struct Qdisc *sch, u32 classid)
sch               115 net/sched/sch_atm.c 	struct atm_qdisc_data *p __maybe_unused = qdisc_priv(sch);
sch               118 net/sched/sch_atm.c 	pr_debug("%s(sch %p,[qdisc %p],classid %x)\n", __func__, sch, p, classid);
sch               119 net/sched/sch_atm.c 	flow = lookup_flow(sch, classid);
sch               124 net/sched/sch_atm.c static unsigned long atm_tc_bind_filter(struct Qdisc *sch,
sch               127 net/sched/sch_atm.c 	struct atm_qdisc_data *p __maybe_unused = qdisc_priv(sch);
sch               130 net/sched/sch_atm.c 	pr_debug("%s(sch %p,[qdisc %p],classid %x)\n", __func__, sch, p, classid);
sch               131 net/sched/sch_atm.c 	flow = lookup_flow(sch, classid);
sch               143 net/sched/sch_atm.c static void atm_tc_put(struct Qdisc *sch, unsigned long cl)
sch               145 net/sched/sch_atm.c 	struct atm_qdisc_data *p = qdisc_priv(sch);
sch               148 net/sched/sch_atm.c 	pr_debug("atm_tc_put(sch %p,[qdisc %p],flow %p)\n", sch, p, flow);
sch               163 net/sched/sch_atm.c 		atm_tc_put(sch, (unsigned long)flow->excess);
sch               195 net/sched/sch_atm.c static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent,
sch               199 net/sched/sch_atm.c 	struct atm_qdisc_data *p = qdisc_priv(sch);
sch               209 net/sched/sch_atm.c 		"flow %p,opt %p)\n", sch, p, classid, parent, flow, opt);
sch               213 net/sched/sch_atm.c 	if (parent && parent != TC_H_ROOT && parent != sch->handle)
sch               247 net/sched/sch_atm.c 			atm_tc_find(sch, nla_get_u32(tb[TCA_ATM_EXCESS]));
sch               264 net/sched/sch_atm.c 		if (TC_H_MAJ(classid ^ sch->handle)) {
sch               274 net/sched/sch_atm.c 			classid = TC_H_MAKE(sch->handle, 0x8000 | i);
sch               275 net/sched/sch_atm.c 			cl = atm_tc_find(sch, classid);
sch               288 net/sched/sch_atm.c 	error = tcf_block_get(&flow->block, &flow->filter_list, sch,
sch               295 net/sched/sch_atm.c 	flow->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid,
sch               323 net/sched/sch_atm.c static int atm_tc_delete(struct Qdisc *sch, unsigned long arg)
sch               325 net/sched/sch_atm.c 	struct atm_qdisc_data *p = qdisc_priv(sch);
sch               328 net/sched/sch_atm.c 	pr_debug("atm_tc_delete(sch %p,[qdisc %p],flow %p)\n", sch, p, flow);
sch               343 net/sched/sch_atm.c 	atm_tc_put(sch, arg);
sch               347 net/sched/sch_atm.c static void atm_tc_walk(struct Qdisc *sch, struct qdisc_walker *walker)
sch               349 net/sched/sch_atm.c 	struct atm_qdisc_data *p = qdisc_priv(sch);
sch               352 net/sched/sch_atm.c 	pr_debug("atm_tc_walk(sch %p,[qdisc %p],walker %p)\n", sch, p, walker);
sch               357 net/sched/sch_atm.c 		    walker->fn(sch, (unsigned long)flow, walker) < 0) {
sch               365 net/sched/sch_atm.c static struct tcf_block *atm_tc_tcf_block(struct Qdisc *sch, unsigned long cl,
sch               368 net/sched/sch_atm.c 	struct atm_qdisc_data *p = qdisc_priv(sch);
sch               371 net/sched/sch_atm.c 	pr_debug("atm_tc_find_tcf(sch %p,[qdisc %p],flow %p)\n", sch, p, flow);
sch               377 net/sched/sch_atm.c static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
sch               380 net/sched/sch_atm.c 	struct atm_qdisc_data *p = qdisc_priv(sch);
sch               386 net/sched/sch_atm.c 	pr_debug("atm_tc_enqueue(skb %p,sch %p,[qdisc %p])\n", skb, sch, p);
sch               389 net/sched/sch_atm.c 	if (TC_H_MAJ(skb->priority) != sch->handle ||
sch               390 net/sched/sch_atm.c 	    !(flow = (struct atm_flow_data *)atm_tc_find(sch, skb->priority))) {
sch               401 net/sched/sch_atm.c 					flow = lookup_flow(sch, res.classid);
sch               439 net/sched/sch_atm.c 			qdisc_qstats_drop(sch);
sch               455 net/sched/sch_atm.c 		sch->q.qlen++;
sch               471 net/sched/sch_atm.c 	struct Qdisc *sch = (struct Qdisc *)data;
sch               472 net/sched/sch_atm.c 	struct atm_qdisc_data *p = qdisc_priv(sch);
sch               476 net/sched/sch_atm.c 	pr_debug("sch_atm_dequeue(sch %p,[qdisc %p])\n", sch, p);
sch               492 net/sched/sch_atm.c 			qdisc_bstats_update(sch, skb);
sch               519 net/sched/sch_atm.c static struct sk_buff *atm_tc_dequeue(struct Qdisc *sch)
sch               521 net/sched/sch_atm.c 	struct atm_qdisc_data *p = qdisc_priv(sch);
sch               524 net/sched/sch_atm.c 	pr_debug("atm_tc_dequeue(sch %p,[qdisc %p])\n", sch, p);
sch               528 net/sched/sch_atm.c 		sch->q.qlen--;
sch               532 net/sched/sch_atm.c static struct sk_buff *atm_tc_peek(struct Qdisc *sch)
sch               534 net/sched/sch_atm.c 	struct atm_qdisc_data *p = qdisc_priv(sch);
sch               536 net/sched/sch_atm.c 	pr_debug("atm_tc_peek(sch %p,[qdisc %p])\n", sch, p);
sch               541 net/sched/sch_atm.c static int atm_tc_init(struct Qdisc *sch, struct nlattr *opt,
sch               544 net/sched/sch_atm.c 	struct atm_qdisc_data *p = qdisc_priv(sch);
sch               547 net/sched/sch_atm.c 	pr_debug("atm_tc_init(sch %p,[qdisc %p],opt %p)\n", sch, p, opt);
sch               551 net/sched/sch_atm.c 	p->link.q = qdisc_create_dflt(sch->dev_queue,
sch               552 net/sched/sch_atm.c 				      &pfifo_qdisc_ops, sch->handle, extack);
sch               557 net/sched/sch_atm.c 	err = tcf_block_get(&p->link.block, &p->link.filter_list, sch,
sch               564 net/sched/sch_atm.c 	p->link.common.classid = sch->handle;
sch               566 net/sched/sch_atm.c 	tasklet_init(&p->task, sch_atm_dequeue, (unsigned long)sch);
sch               570 net/sched/sch_atm.c static void atm_tc_reset(struct Qdisc *sch)
sch               572 net/sched/sch_atm.c 	struct atm_qdisc_data *p = qdisc_priv(sch);
sch               575 net/sched/sch_atm.c 	pr_debug("atm_tc_reset(sch %p,[qdisc %p])\n", sch, p);
sch               578 net/sched/sch_atm.c 	sch->q.qlen = 0;
sch               581 net/sched/sch_atm.c static void atm_tc_destroy(struct Qdisc *sch)
sch               583 net/sched/sch_atm.c 	struct atm_qdisc_data *p = qdisc_priv(sch);
sch               586 net/sched/sch_atm.c 	pr_debug("atm_tc_destroy(sch %p,[qdisc %p])\n", sch, p);
sch               595 net/sched/sch_atm.c 		atm_tc_put(sch, (unsigned long)flow);
sch               600 net/sched/sch_atm.c static int atm_tc_dump_class(struct Qdisc *sch, unsigned long cl,
sch               603 net/sched/sch_atm.c 	struct atm_qdisc_data *p = qdisc_priv(sch);
sch               608 net/sched/sch_atm.c 		sch, p, flow, skb, tcm);
sch               649 net/sched/sch_atm.c atm_tc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
sch               654 net/sched/sch_atm.c 	if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
sch               662 net/sched/sch_atm.c static int atm_tc_dump(struct Qdisc *sch, struct sk_buff *skb)
sch                16 net/sched/sch_blackhole.c static int blackhole_enqueue(struct sk_buff *skb, struct Qdisc *sch,
sch                19 net/sched/sch_blackhole.c 	qdisc_drop(skb, sch, to_free);
sch                23 net/sched/sch_blackhole.c static struct sk_buff *blackhole_dequeue(struct Qdisc *sch)
sch              1461 net/sched/sch_cake.c static unsigned int cake_drop(struct Qdisc *sch, struct sk_buff **to_free)
sch              1463 net/sched/sch_cake.c 	struct cake_sched_data *q = qdisc_priv(sch);
sch              1500 net/sched/sch_cake.c 	sch->qstats.backlog -= len;
sch              1501 net/sched/sch_cake.c 	qdisc_tree_reduce_backlog(sch, 1, len);
sch              1505 net/sched/sch_cake.c 	sch->qstats.drops++;
sch              1511 net/sched/sch_cake.c 	sch->q.qlen--;
sch              1555 net/sched/sch_cake.c static struct cake_tin_data *cake_select_tin(struct Qdisc *sch,
sch              1558 net/sched/sch_cake.c 	struct cake_sched_data *q = qdisc_priv(sch);
sch              1575 net/sched/sch_cake.c 	else if (TC_H_MAJ(skb->priority) == sch->handle &&
sch              1590 net/sched/sch_cake.c static u32 cake_classify(struct Qdisc *sch, struct cake_tin_data **t,
sch              1593 net/sched/sch_cake.c 	struct cake_sched_data *q = qdisc_priv(sch);
sch              1624 net/sched/sch_cake.c 	*t = cake_select_tin(sch, skb);
sch              1628 net/sched/sch_cake.c static void cake_reconfigure(struct Qdisc *sch);
sch              1630 net/sched/sch_cake.c static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
sch              1633 net/sched/sch_cake.c 	struct cake_sched_data *q = qdisc_priv(sch);
sch              1643 net/sched/sch_cake.c 	idx = cake_classify(sch, &b, skb, q->flow_mode, &ret);
sch              1646 net/sched/sch_cake.c 			qdisc_qstats_drop(sch);
sch              1658 net/sched/sch_cake.c 		if (!sch->q.qlen) {
sch              1668 net/sched/sch_cake.c 				sch->qstats.overlimits++;
sch              1684 net/sched/sch_cake.c 			return qdisc_drop(skb, sch, to_free);
sch              1695 net/sched/sch_cake.c 			sch->q.qlen++;
sch              1707 net/sched/sch_cake.c 		sch->qstats.backlog += slen;
sch              1710 net/sched/sch_cake.c 		qdisc_tree_reduce_backlog(sch, 1-numsegs, len-slen);
sch              1723 net/sched/sch_cake.c 			sch->qstats.drops++;
sch              1730 net/sched/sch_cake.c 			qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(ack));
sch              1733 net/sched/sch_cake.c 			sch->q.qlen++;
sch              1742 net/sched/sch_cake.c 		sch->qstats.backlog += len;
sch              1783 net/sched/sch_cake.c 				cake_reconfigure(sch);
sch              1841 net/sched/sch_cake.c 			cake_drop(sch, to_free);
sch              1848 net/sched/sch_cake.c static struct sk_buff *cake_dequeue_one(struct Qdisc *sch)
sch              1850 net/sched/sch_cake.c 	struct cake_sched_data *q = qdisc_priv(sch);
sch              1861 net/sched/sch_cake.c 		sch->qstats.backlog      -= len;
sch              1863 net/sched/sch_cake.c 		sch->q.qlen--;
sch              1872 net/sched/sch_cake.c static void cake_clear_tin(struct Qdisc *sch, u16 tin)
sch              1874 net/sched/sch_cake.c 	struct cake_sched_data *q = qdisc_priv(sch);
sch              1879 net/sched/sch_cake.c 		while (!!(skb = cake_dequeue_one(sch)))
sch              1883 net/sched/sch_cake.c static struct sk_buff *cake_dequeue(struct Qdisc *sch)
sch              1885 net/sched/sch_cake.c 	struct cake_sched_data *q = qdisc_priv(sch);
sch              1898 net/sched/sch_cake.c 	if (!sch->q.qlen)
sch              1907 net/sched/sch_cake.c 		sch->qstats.overlimits++;
sch              2045 net/sched/sch_cake.c 		skb = cake_dequeue_one(sch);
sch              2114 net/sched/sch_cake.c 		qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(skb));
sch              2115 net/sched/sch_cake.c 		qdisc_qstats_drop(sch);
sch              2122 net/sched/sch_cake.c 	qdisc_bstats_update(sch, skb);
sch              2136 net/sched/sch_cake.c 	if (ktime_after(q->time_next_packet, now) && sch->q.qlen) {
sch              2141 net/sched/sch_cake.c 	} else if (!sch->q.qlen) {
sch              2163 net/sched/sch_cake.c static void cake_reset(struct Qdisc *sch)
sch              2168 net/sched/sch_cake.c 		cake_clear_tin(sch, c);
sch              2230 net/sched/sch_cake.c static int cake_config_besteffort(struct Qdisc *sch)
sch              2232 net/sched/sch_cake.c 	struct cake_sched_data *q = qdisc_priv(sch);
sch              2234 net/sched/sch_cake.c 	u32 mtu = psched_mtu(qdisc_dev(sch));
sch              2250 net/sched/sch_cake.c static int cake_config_precedence(struct Qdisc *sch)
sch              2253 net/sched/sch_cake.c 	struct cake_sched_data *q = qdisc_priv(sch);
sch              2254 net/sched/sch_cake.c 	u32 mtu = psched_mtu(qdisc_dev(sch));
sch              2331 net/sched/sch_cake.c static int cake_config_diffserv8(struct Qdisc *sch)
sch              2347 net/sched/sch_cake.c 	struct cake_sched_data *q = qdisc_priv(sch);
sch              2348 net/sched/sch_cake.c 	u32 mtu = psched_mtu(qdisc_dev(sch));
sch              2384 net/sched/sch_cake.c static int cake_config_diffserv4(struct Qdisc *sch)
sch              2396 net/sched/sch_cake.c 	struct cake_sched_data *q = qdisc_priv(sch);
sch              2397 net/sched/sch_cake.c 	u32 mtu = psched_mtu(qdisc_dev(sch));
sch              2432 net/sched/sch_cake.c static int cake_config_diffserv3(struct Qdisc *sch)
sch              2439 net/sched/sch_cake.c 	struct cake_sched_data *q = qdisc_priv(sch);
sch              2440 net/sched/sch_cake.c 	u32 mtu = psched_mtu(qdisc_dev(sch));
sch              2471 net/sched/sch_cake.c static void cake_reconfigure(struct Qdisc *sch)
sch              2473 net/sched/sch_cake.c 	struct cake_sched_data *q = qdisc_priv(sch);
sch              2478 net/sched/sch_cake.c 		ft = cake_config_besteffort(sch);
sch              2482 net/sched/sch_cake.c 		ft = cake_config_precedence(sch);
sch              2486 net/sched/sch_cake.c 		ft = cake_config_diffserv8(sch);
sch              2490 net/sched/sch_cake.c 		ft = cake_config_diffserv4(sch);
sch              2495 net/sched/sch_cake.c 		ft = cake_config_diffserv3(sch);
sch              2500 net/sched/sch_cake.c 		cake_clear_tin(sch, c);
sch              2518 net/sched/sch_cake.c 	sch->flags &= ~TCQ_F_CAN_BYPASS;
sch              2521 net/sched/sch_cake.c 			      max(sch->limit * psched_mtu(qdisc_dev(sch)),
sch              2525 net/sched/sch_cake.c static int cake_change(struct Qdisc *sch, struct nlattr *opt,
sch              2528 net/sched/sch_cake.c 	struct cake_sched_data *q = qdisc_priv(sch);
sch              2642 net/sched/sch_cake.c 		sch_tree_lock(sch);
sch              2643 net/sched/sch_cake.c 		cake_reconfigure(sch);
sch              2644 net/sched/sch_cake.c 		sch_tree_unlock(sch);
sch              2650 net/sched/sch_cake.c static void cake_destroy(struct Qdisc *sch)
sch              2652 net/sched/sch_cake.c 	struct cake_sched_data *q = qdisc_priv(sch);
sch              2659 net/sched/sch_cake.c static int cake_init(struct Qdisc *sch, struct nlattr *opt,
sch              2662 net/sched/sch_cake.c 	struct cake_sched_data *q = qdisc_priv(sch);
sch              2665 net/sched/sch_cake.c 	sch->limit = 10240;
sch              2679 net/sched/sch_cake.c 	qdisc_watchdog_init(&q->watchdog, sch);
sch              2682 net/sched/sch_cake.c 		int err = cake_change(sch, opt, extack);
sch              2688 net/sched/sch_cake.c 	err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
sch              2724 net/sched/sch_cake.c 	cake_reconfigure(sch);
sch              2731 net/sched/sch_cake.c 	cake_destroy(sch);
sch              2735 net/sched/sch_cake.c static int cake_dump(struct Qdisc *sch, struct sk_buff *skb)
sch              2737 net/sched/sch_cake.c 	struct cake_sched_data *q = qdisc_priv(sch);
sch              2809 net/sched/sch_cake.c static int cake_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
sch              2812 net/sched/sch_cake.c 	struct cake_sched_data *q = qdisc_priv(sch);
sch              2908 net/sched/sch_cake.c static struct Qdisc *cake_leaf(struct Qdisc *sch, unsigned long arg)
sch              2913 net/sched/sch_cake.c static unsigned long cake_find(struct Qdisc *sch, u32 classid)
sch              2918 net/sched/sch_cake.c static unsigned long cake_bind(struct Qdisc *sch, unsigned long parent,
sch              2928 net/sched/sch_cake.c static struct tcf_block *cake_tcf_block(struct Qdisc *sch, unsigned long cl,
sch              2931 net/sched/sch_cake.c 	struct cake_sched_data *q = qdisc_priv(sch);
sch              2938 net/sched/sch_cake.c static int cake_dump_class(struct Qdisc *sch, unsigned long cl,
sch              2945 net/sched/sch_cake.c static int cake_dump_class_stats(struct Qdisc *sch, unsigned long cl,
sch              2948 net/sched/sch_cake.c 	struct cake_sched_data *q = qdisc_priv(sch);
sch              2962 net/sched/sch_cake.c 			sch_tree_lock(sch);
sch              2968 net/sched/sch_cake.c 			sch_tree_unlock(sch);
sch              3019 net/sched/sch_cake.c static void cake_walk(struct Qdisc *sch, struct qdisc_walker *arg)
sch              3021 net/sched/sch_cake.c 	struct cake_sched_data *q = qdisc_priv(sch);
sch              3036 net/sched/sch_cake.c 			if (arg->fn(sch, i * CAKE_QUEUES + j + 1, arg) < 0) {
sch               205 net/sched/sch_cbq.c cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
sch               207 net/sched/sch_cbq.c 	struct cbq_sched_data *q = qdisc_priv(sch);
sch               218 net/sched/sch_cbq.c 	if (TC_H_MAJ(prio ^ sch->handle) == 0 &&
sch               359 net/sched/sch_cbq.c cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
sch               362 net/sched/sch_cbq.c 	struct cbq_sched_data *q = qdisc_priv(sch);
sch               364 net/sched/sch_cbq.c 	struct cbq_class *cl = cbq_classify(skb, sch, &ret);
sch               371 net/sched/sch_cbq.c 			qdisc_qstats_drop(sch);
sch               378 net/sched/sch_cbq.c 		sch->q.qlen++;
sch               386 net/sched/sch_cbq.c 		qdisc_qstats_drop(sch);
sch               483 net/sched/sch_cbq.c 	struct Qdisc *sch = q->watchdog.qdisc;
sch               515 net/sched/sch_cbq.c 	__netif_schedule(qdisc_root(sch));
sch               679 net/sched/sch_cbq.c cbq_dequeue_prio(struct Qdisc *sch, int prio)
sch               681 net/sched/sch_cbq.c 	struct cbq_sched_data *q = qdisc_priv(sch);
sch               782 net/sched/sch_cbq.c cbq_dequeue_1(struct Qdisc *sch)
sch               784 net/sched/sch_cbq.c 	struct cbq_sched_data *q = qdisc_priv(sch);
sch               792 net/sched/sch_cbq.c 		skb = cbq_dequeue_prio(sch, prio);
sch               800 net/sched/sch_cbq.c cbq_dequeue(struct Qdisc *sch)
sch               803 net/sched/sch_cbq.c 	struct cbq_sched_data *q = qdisc_priv(sch);
sch               816 net/sched/sch_cbq.c 		skb = cbq_dequeue_1(sch);
sch               818 net/sched/sch_cbq.c 			qdisc_bstats_update(sch, skb);
sch               819 net/sched/sch_cbq.c 			sch->q.qlen--;
sch               853 net/sched/sch_cbq.c 	if (sch->q.qlen) {
sch               854 net/sched/sch_cbq.c 		qdisc_qstats_overlimit(sch);
sch              1026 net/sched/sch_cbq.c cbq_reset(struct Qdisc *sch)
sch              1028 net/sched/sch_cbq.c 	struct cbq_sched_data *q = qdisc_priv(sch);
sch              1056 net/sched/sch_cbq.c 	sch->q.qlen = 0;
sch              1157 net/sched/sch_cbq.c static int cbq_init(struct Qdisc *sch, struct nlattr *opt,
sch              1160 net/sched/sch_cbq.c 	struct cbq_sched_data *q = qdisc_priv(sch);
sch              1165 net/sched/sch_cbq.c 	qdisc_watchdog_init(&q->watchdog, sch);
sch              1184 net/sched/sch_cbq.c 	err = tcf_block_get(&q->link.block, &q->link.filter_list, sch, extack);
sch              1193 net/sched/sch_cbq.c 	q->link.common.classid = sch->handle;
sch              1194 net/sched/sch_cbq.c 	q->link.qdisc = sch;
sch              1195 net/sched/sch_cbq.c 	q->link.q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
sch              1196 net/sched/sch_cbq.c 				      sch->handle, NULL);
sch              1205 net/sched/sch_cbq.c 	q->link.allot = psched_mtu(qdisc_dev(sch));
sch              1320 net/sched/sch_cbq.c static int cbq_dump(struct Qdisc *sch, struct sk_buff *skb)
sch              1322 net/sched/sch_cbq.c 	struct cbq_sched_data *q = qdisc_priv(sch);
sch              1338 net/sched/sch_cbq.c cbq_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
sch              1340 net/sched/sch_cbq.c 	struct cbq_sched_data *q = qdisc_priv(sch);
sch              1347 net/sched/sch_cbq.c cbq_dump_class(struct Qdisc *sch, unsigned long arg,
sch              1373 net/sched/sch_cbq.c cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
sch              1376 net/sched/sch_cbq.c 	struct cbq_sched_data *q = qdisc_priv(sch);
sch              1387 net/sched/sch_cbq.c 	if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
sch              1396 net/sched/sch_cbq.c static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
sch              1402 net/sched/sch_cbq.c 		new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
sch              1408 net/sched/sch_cbq.c 	*old = qdisc_replace(sch, new, &cl->q);
sch              1412 net/sched/sch_cbq.c static struct Qdisc *cbq_leaf(struct Qdisc *sch, unsigned long arg)
sch              1419 net/sched/sch_cbq.c static void cbq_qlen_notify(struct Qdisc *sch, unsigned long arg)
sch              1426 net/sched/sch_cbq.c static unsigned long cbq_find(struct Qdisc *sch, u32 classid)
sch              1428 net/sched/sch_cbq.c 	struct cbq_sched_data *q = qdisc_priv(sch);
sch              1433 net/sched/sch_cbq.c static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl)
sch              1435 net/sched/sch_cbq.c 	struct cbq_sched_data *q = qdisc_priv(sch);
sch              1447 net/sched/sch_cbq.c static void cbq_destroy(struct Qdisc *sch)
sch              1449 net/sched/sch_cbq.c 	struct cbq_sched_data *q = qdisc_priv(sch);
sch              1471 net/sched/sch_cbq.c 			cbq_destroy_class(sch, cl);
sch              1477 net/sched/sch_cbq.c cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **tca,
sch              1481 net/sched/sch_cbq.c 	struct cbq_sched_data *q = qdisc_priv(sch);
sch              1522 net/sched/sch_cbq.c 						    qdisc_root_sleeping_running(sch),
sch              1532 net/sched/sch_cbq.c 		sch_tree_lock(sch);
sch              1556 net/sched/sch_cbq.c 		sch_tree_unlock(sch);
sch              1576 net/sched/sch_cbq.c 		if (TC_H_MAJ(classid ^ sch->handle) ||
sch              1583 net/sched/sch_cbq.c 		classid = TC_H_MAKE(sch->handle, 0x8000);
sch              1614 net/sched/sch_cbq.c 	err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack);
sch              1623 net/sched/sch_cbq.c 					qdisc_root_sleeping_running(sch),
sch              1635 net/sched/sch_cbq.c 	cl->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid,
sch              1644 net/sched/sch_cbq.c 	cl->qdisc = sch;
sch              1649 net/sched/sch_cbq.c 	sch_tree_lock(sch);
sch              1666 net/sched/sch_cbq.c 	sch_tree_unlock(sch);
sch              1668 net/sched/sch_cbq.c 	qdisc_class_hash_grow(sch, &q->clhash);
sch              1678 net/sched/sch_cbq.c static int cbq_delete(struct Qdisc *sch, unsigned long arg)
sch              1680 net/sched/sch_cbq.c 	struct cbq_sched_data *q = qdisc_priv(sch);
sch              1686 net/sched/sch_cbq.c 	sch_tree_lock(sch);
sch              1710 net/sched/sch_cbq.c 	sch_tree_unlock(sch);
sch              1712 net/sched/sch_cbq.c 	cbq_destroy_class(sch, cl);
sch              1716 net/sched/sch_cbq.c static struct tcf_block *cbq_tcf_block(struct Qdisc *sch, unsigned long arg,
sch              1719 net/sched/sch_cbq.c 	struct cbq_sched_data *q = qdisc_priv(sch);
sch              1728 net/sched/sch_cbq.c static unsigned long cbq_bind_filter(struct Qdisc *sch, unsigned long parent,
sch              1731 net/sched/sch_cbq.c 	struct cbq_sched_data *q = qdisc_priv(sch);
sch              1744 net/sched/sch_cbq.c static void cbq_unbind_filter(struct Qdisc *sch, unsigned long arg)
sch              1751 net/sched/sch_cbq.c static void cbq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
sch              1753 net/sched/sch_cbq.c 	struct cbq_sched_data *q = qdisc_priv(sch);
sch              1766 net/sched/sch_cbq.c 			if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
sch                80 net/sched/sch_cbs.c 	int (*enqueue)(struct sk_buff *skb, struct Qdisc *sch,
sch                82 net/sched/sch_cbs.c 	struct sk_buff *(*dequeue)(struct Qdisc *sch);
sch                87 net/sched/sch_cbs.c static int cbs_child_enqueue(struct sk_buff *skb, struct Qdisc *sch,
sch                98 net/sched/sch_cbs.c 	sch->qstats.backlog += len;
sch                99 net/sched/sch_cbs.c 	sch->q.qlen++;
sch               104 net/sched/sch_cbs.c static int cbs_enqueue_offload(struct sk_buff *skb, struct Qdisc *sch,
sch               107 net/sched/sch_cbs.c 	struct cbs_sched_data *q = qdisc_priv(sch);
sch               110 net/sched/sch_cbs.c 	return cbs_child_enqueue(skb, sch, qdisc, to_free);
sch               113 net/sched/sch_cbs.c static int cbs_enqueue_soft(struct sk_buff *skb, struct Qdisc *sch,
sch               116 net/sched/sch_cbs.c 	struct cbs_sched_data *q = qdisc_priv(sch);
sch               119 net/sched/sch_cbs.c 	if (sch->q.qlen == 0 && q->credits > 0) {
sch               127 net/sched/sch_cbs.c 	return cbs_child_enqueue(skb, sch, qdisc, to_free);
sch               130 net/sched/sch_cbs.c static int cbs_enqueue(struct sk_buff *skb, struct Qdisc *sch,
sch               133 net/sched/sch_cbs.c 	struct cbs_sched_data *q = qdisc_priv(sch);
sch               135 net/sched/sch_cbs.c 	return q->enqueue(skb, sch, to_free);
sch               160 net/sched/sch_cbs.c static struct sk_buff *cbs_child_dequeue(struct Qdisc *sch, struct Qdisc *child)
sch               168 net/sched/sch_cbs.c 	qdisc_qstats_backlog_dec(sch, skb);
sch               169 net/sched/sch_cbs.c 	qdisc_bstats_update(sch, skb);
sch               170 net/sched/sch_cbs.c 	sch->q.qlen--;
sch               175 net/sched/sch_cbs.c static struct sk_buff *cbs_dequeue_soft(struct Qdisc *sch)
sch               177 net/sched/sch_cbs.c 	struct cbs_sched_data *q = qdisc_priv(sch);
sch               206 net/sched/sch_cbs.c 	skb = cbs_child_dequeue(sch, qdisc);
sch               230 net/sched/sch_cbs.c static struct sk_buff *cbs_dequeue_offload(struct Qdisc *sch)
sch               232 net/sched/sch_cbs.c 	struct cbs_sched_data *q = qdisc_priv(sch);
sch               235 net/sched/sch_cbs.c 	return cbs_child_dequeue(sch, qdisc);
sch               238 net/sched/sch_cbs.c static struct sk_buff *cbs_dequeue(struct Qdisc *sch)
sch               240 net/sched/sch_cbs.c 	struct cbs_sched_data *q = qdisc_priv(sch);
sch               242 net/sched/sch_cbs.c 	return q->dequeue(sch);
sch               360 net/sched/sch_cbs.c static int cbs_change(struct Qdisc *sch, struct nlattr *opt,
sch               363 net/sched/sch_cbs.c 	struct cbs_sched_data *q = qdisc_priv(sch);
sch               364 net/sched/sch_cbs.c 	struct net_device *dev = qdisc_dev(sch);
sch               400 net/sched/sch_cbs.c static int cbs_init(struct Qdisc *sch, struct nlattr *opt,
sch               403 net/sched/sch_cbs.c 	struct cbs_sched_data *q = qdisc_priv(sch);
sch               404 net/sched/sch_cbs.c 	struct net_device *dev = qdisc_dev(sch);
sch               411 net/sched/sch_cbs.c 	q->qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
sch               412 net/sched/sch_cbs.c 				     sch->handle, extack);
sch               422 net/sched/sch_cbs.c 	q->queue = sch->dev_queue - netdev_get_tx_queue(dev, 0);
sch               427 net/sched/sch_cbs.c 	qdisc_watchdog_init(&q->watchdog, sch);
sch               429 net/sched/sch_cbs.c 	return cbs_change(sch, opt, extack);
sch               432 net/sched/sch_cbs.c static void cbs_destroy(struct Qdisc *sch)
sch               434 net/sched/sch_cbs.c 	struct cbs_sched_data *q = qdisc_priv(sch);
sch               435 net/sched/sch_cbs.c 	struct net_device *dev = qdisc_dev(sch);
sch               451 net/sched/sch_cbs.c static int cbs_dump(struct Qdisc *sch, struct sk_buff *skb)
sch               453 net/sched/sch_cbs.c 	struct cbs_sched_data *q = qdisc_priv(sch);
sch               477 net/sched/sch_cbs.c static int cbs_dump_class(struct Qdisc *sch, unsigned long cl,
sch               480 net/sched/sch_cbs.c 	struct cbs_sched_data *q = qdisc_priv(sch);
sch               491 net/sched/sch_cbs.c static int cbs_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
sch               494 net/sched/sch_cbs.c 	struct cbs_sched_data *q = qdisc_priv(sch);
sch               497 net/sched/sch_cbs.c 		new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
sch               498 net/sched/sch_cbs.c 					sch->handle, NULL);
sch               503 net/sched/sch_cbs.c 	*old = qdisc_replace(sch, new, &q->qdisc);
sch               507 net/sched/sch_cbs.c static struct Qdisc *cbs_leaf(struct Qdisc *sch, unsigned long arg)
sch               509 net/sched/sch_cbs.c 	struct cbs_sched_data *q = qdisc_priv(sch);
sch               514 net/sched/sch_cbs.c static unsigned long cbs_find(struct Qdisc *sch, u32 classid)
sch               519 net/sched/sch_cbs.c static void cbs_walk(struct Qdisc *sch, struct qdisc_walker *walker)
sch               523 net/sched/sch_cbs.c 			if (walker->fn(sch, 1, walker) < 0) {
sch               114 net/sched/sch_choke.c static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx,
sch               117 net/sched/sch_choke.c 	struct choke_sched_data *q = qdisc_priv(sch);
sch               127 net/sched/sch_choke.c 	qdisc_qstats_backlog_dec(sch, skb);
sch               128 net/sched/sch_choke.c 	qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(skb));
sch               129 net/sched/sch_choke.c 	qdisc_drop(skb, sch, to_free);
sch               130 net/sched/sch_choke.c 	--sch->q.qlen;
sch               219 net/sched/sch_choke.c static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch,
sch               222 net/sched/sch_choke.c 	struct choke_sched_data *q = qdisc_priv(sch);
sch               227 net/sched/sch_choke.c 	q->vars.qavg = red_calc_qavg(p, &q->vars, sch->q.qlen);
sch               240 net/sched/sch_choke.c 			choke_drop_by_idx(sch, idx, to_free);
sch               248 net/sched/sch_choke.c 			qdisc_qstats_overlimit(sch);
sch               261 net/sched/sch_choke.c 				qdisc_qstats_overlimit(sch);
sch               274 net/sched/sch_choke.c 	if (sch->q.qlen < q->limit) {
sch               277 net/sched/sch_choke.c 		++sch->q.qlen;
sch               278 net/sched/sch_choke.c 		qdisc_qstats_backlog_inc(sch, skb);
sch               283 net/sched/sch_choke.c 	return qdisc_drop(skb, sch, to_free);
sch               286 net/sched/sch_choke.c 	qdisc_drop(skb, sch, to_free);
sch               290 net/sched/sch_choke.c static struct sk_buff *choke_dequeue(struct Qdisc *sch)
sch               292 net/sched/sch_choke.c 	struct choke_sched_data *q = qdisc_priv(sch);
sch               304 net/sched/sch_choke.c 	--sch->q.qlen;
sch               305 net/sched/sch_choke.c 	qdisc_qstats_backlog_dec(sch, skb);
sch               306 net/sched/sch_choke.c 	qdisc_bstats_update(sch, skb);
sch               311 net/sched/sch_choke.c static void choke_reset(struct Qdisc *sch)
sch               313 net/sched/sch_choke.c 	struct choke_sched_data *q = qdisc_priv(sch);
sch               321 net/sched/sch_choke.c 		rtnl_qdisc_drop(skb, sch);
sch               324 net/sched/sch_choke.c 	sch->q.qlen = 0;
sch               325 net/sched/sch_choke.c 	sch->qstats.backlog = 0;
sch               344 net/sched/sch_choke.c static int choke_change(struct Qdisc *sch, struct nlattr *opt,
sch               347 net/sched/sch_choke.c 	struct choke_sched_data *q = qdisc_priv(sch);
sch               385 net/sched/sch_choke.c 		sch_tree_lock(sch);
sch               388 net/sched/sch_choke.c 			unsigned int oqlen = sch->q.qlen, tail = 0;
sch               402 net/sched/sch_choke.c 				qdisc_qstats_backlog_dec(sch, skb);
sch               403 net/sched/sch_choke.c 				--sch->q.qlen;
sch               404 net/sched/sch_choke.c 				rtnl_qdisc_drop(skb, sch);
sch               406 net/sched/sch_choke.c 			qdisc_tree_reduce_backlog(sch, oqlen - sch->q.qlen, dropped);
sch               414 net/sched/sch_choke.c 		sch_tree_lock(sch);
sch               428 net/sched/sch_choke.c 	sch_tree_unlock(sch);
sch               433 net/sched/sch_choke.c static int choke_init(struct Qdisc *sch, struct nlattr *opt,
sch               436 net/sched/sch_choke.c 	return choke_change(sch, opt, extack);
sch               439 net/sched/sch_choke.c static int choke_dump(struct Qdisc *sch, struct sk_buff *skb)
sch               441 net/sched/sch_choke.c 	struct choke_sched_data *q = qdisc_priv(sch);
sch               467 net/sched/sch_choke.c static int choke_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
sch               469 net/sched/sch_choke.c 	struct choke_sched_data *q = qdisc_priv(sch);
sch               481 net/sched/sch_choke.c static void choke_destroy(struct Qdisc *sch)
sch               483 net/sched/sch_choke.c 	struct choke_sched_data *q = qdisc_priv(sch);
sch               488 net/sched/sch_choke.c static struct sk_buff *choke_peek_head(struct Qdisc *sch)
sch               490 net/sched/sch_choke.c 	struct choke_sched_data *q = qdisc_priv(sch);
sch                71 net/sched/sch_codel.c 	struct Qdisc *sch = ctx;
sch                72 net/sched/sch_codel.c 	struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
sch                75 net/sched/sch_codel.c 		sch->qstats.backlog -= qdisc_pkt_len(skb);
sch                83 net/sched/sch_codel.c 	struct Qdisc *sch = ctx;
sch                86 net/sched/sch_codel.c 	qdisc_qstats_drop(sch);
sch                89 net/sched/sch_codel.c static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch)
sch                91 net/sched/sch_codel.c 	struct codel_sched_data *q = qdisc_priv(sch);
sch                94 net/sched/sch_codel.c 	skb = codel_dequeue(sch, &sch->qstats.backlog, &q->params, &q->vars,
sch               101 net/sched/sch_codel.c 	if (q->stats.drop_count && sch->q.qlen) {
sch               102 net/sched/sch_codel.c 		qdisc_tree_reduce_backlog(sch, q->stats.drop_count, q->stats.drop_len);
sch               107 net/sched/sch_codel.c 		qdisc_bstats_update(sch, skb);
sch               111 net/sched/sch_codel.c static int codel_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
sch               116 net/sched/sch_codel.c 	if (likely(qdisc_qlen(sch) < sch->limit)) {
sch               118 net/sched/sch_codel.c 		return qdisc_enqueue_tail(skb, sch);
sch               120 net/sched/sch_codel.c 	q = qdisc_priv(sch);
sch               122 net/sched/sch_codel.c 	return qdisc_drop(skb, sch, to_free);
sch               133 net/sched/sch_codel.c static int codel_change(struct Qdisc *sch, struct nlattr *opt,
sch               136 net/sched/sch_codel.c 	struct codel_sched_data *q = qdisc_priv(sch);
sch               149 net/sched/sch_codel.c 	sch_tree_lock(sch);
sch               170 net/sched/sch_codel.c 		sch->limit = nla_get_u32(tb[TCA_CODEL_LIMIT]);
sch               175 net/sched/sch_codel.c 	qlen = sch->q.qlen;
sch               176 net/sched/sch_codel.c 	while (sch->q.qlen > sch->limit) {
sch               177 net/sched/sch_codel.c 		struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
sch               180 net/sched/sch_codel.c 		qdisc_qstats_backlog_dec(sch, skb);
sch               181 net/sched/sch_codel.c 		rtnl_qdisc_drop(skb, sch);
sch               183 net/sched/sch_codel.c 	qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
sch               185 net/sched/sch_codel.c 	sch_tree_unlock(sch);
sch               189 net/sched/sch_codel.c static int codel_init(struct Qdisc *sch, struct nlattr *opt,
sch               192 net/sched/sch_codel.c 	struct codel_sched_data *q = qdisc_priv(sch);
sch               194 net/sched/sch_codel.c 	sch->limit = DEFAULT_CODEL_LIMIT;
sch               199 net/sched/sch_codel.c 	q->params.mtu = psched_mtu(qdisc_dev(sch));
sch               202 net/sched/sch_codel.c 		int err = codel_change(sch, opt, extack);
sch               208 net/sched/sch_codel.c 	if (sch->limit >= 1)
sch               209 net/sched/sch_codel.c 		sch->flags |= TCQ_F_CAN_BYPASS;
sch               211 net/sched/sch_codel.c 		sch->flags &= ~TCQ_F_CAN_BYPASS;
sch               216 net/sched/sch_codel.c static int codel_dump(struct Qdisc *sch, struct sk_buff *skb)
sch               218 net/sched/sch_codel.c 	struct codel_sched_data *q = qdisc_priv(sch);
sch               228 net/sched/sch_codel.c 			sch->limit) ||
sch               245 net/sched/sch_codel.c static int codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
sch               247 net/sched/sch_codel.c 	const struct codel_sched_data *q = qdisc_priv(sch);
sch               271 net/sched/sch_codel.c static void codel_reset(struct Qdisc *sch)
sch               273 net/sched/sch_codel.c 	struct codel_sched_data *q = qdisc_priv(sch);
sch               275 net/sched/sch_codel.c 	qdisc_reset_queue(sch);
sch                39 net/sched/sch_drr.c static struct drr_class *drr_find_class(struct Qdisc *sch, u32 classid)
sch                41 net/sched/sch_drr.c 	struct drr_sched *q = qdisc_priv(sch);
sch                54 net/sched/sch_drr.c static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
sch                58 net/sched/sch_drr.c 	struct drr_sched *q = qdisc_priv(sch);
sch                82 net/sched/sch_drr.c 		quantum = psched_mtu(qdisc_dev(sch));
sch                89 net/sched/sch_drr.c 						    qdisc_root_sleeping_running(sch),
sch                97 net/sched/sch_drr.c 		sch_tree_lock(sch);
sch               100 net/sched/sch_drr.c 		sch_tree_unlock(sch);
sch               111 net/sched/sch_drr.c 	cl->qdisc	   = qdisc_create_dflt(sch->dev_queue,
sch               122 net/sched/sch_drr.c 					    qdisc_root_sleeping_running(sch),
sch               132 net/sched/sch_drr.c 	sch_tree_lock(sch);
sch               134 net/sched/sch_drr.c 	sch_tree_unlock(sch);
sch               136 net/sched/sch_drr.c 	qdisc_class_hash_grow(sch, &q->clhash);
sch               142 net/sched/sch_drr.c static void drr_destroy_class(struct Qdisc *sch, struct drr_class *cl)
sch               149 net/sched/sch_drr.c static int drr_delete_class(struct Qdisc *sch, unsigned long arg)
sch               151 net/sched/sch_drr.c 	struct drr_sched *q = qdisc_priv(sch);
sch               157 net/sched/sch_drr.c 	sch_tree_lock(sch);
sch               162 net/sched/sch_drr.c 	sch_tree_unlock(sch);
sch               164 net/sched/sch_drr.c 	drr_destroy_class(sch, cl);
sch               168 net/sched/sch_drr.c static unsigned long drr_search_class(struct Qdisc *sch, u32 classid)
sch               170 net/sched/sch_drr.c 	return (unsigned long)drr_find_class(sch, classid);
sch               173 net/sched/sch_drr.c static struct tcf_block *drr_tcf_block(struct Qdisc *sch, unsigned long cl,
sch               176 net/sched/sch_drr.c 	struct drr_sched *q = qdisc_priv(sch);
sch               186 net/sched/sch_drr.c static unsigned long drr_bind_tcf(struct Qdisc *sch, unsigned long parent,
sch               189 net/sched/sch_drr.c 	struct drr_class *cl = drr_find_class(sch, classid);
sch               197 net/sched/sch_drr.c static void drr_unbind_tcf(struct Qdisc *sch, unsigned long arg)
sch               204 net/sched/sch_drr.c static int drr_graft_class(struct Qdisc *sch, unsigned long arg,
sch               211 net/sched/sch_drr.c 		new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
sch               217 net/sched/sch_drr.c 	*old = qdisc_replace(sch, new, &cl->qdisc);
sch               221 net/sched/sch_drr.c static struct Qdisc *drr_class_leaf(struct Qdisc *sch, unsigned long arg)
sch               235 net/sched/sch_drr.c static int drr_dump_class(struct Qdisc *sch, unsigned long arg,
sch               257 net/sched/sch_drr.c static int drr_dump_class_stats(struct Qdisc *sch, unsigned long arg,
sch               269 net/sched/sch_drr.c 	if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
sch               278 net/sched/sch_drr.c static void drr_walk(struct Qdisc *sch, struct qdisc_walker *arg)
sch               280 net/sched/sch_drr.c 	struct drr_sched *q = qdisc_priv(sch);
sch               293 net/sched/sch_drr.c 			if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
sch               302 net/sched/sch_drr.c static struct drr_class *drr_classify(struct sk_buff *skb, struct Qdisc *sch,
sch               305 net/sched/sch_drr.c 	struct drr_sched *q = qdisc_priv(sch);
sch               311 net/sched/sch_drr.c 	if (TC_H_MAJ(skb->priority ^ sch->handle) == 0) {
sch               312 net/sched/sch_drr.c 		cl = drr_find_class(sch, skb->priority);
sch               334 net/sched/sch_drr.c 			cl = drr_find_class(sch, res.classid);
sch               340 net/sched/sch_drr.c static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch,
sch               344 net/sched/sch_drr.c 	struct drr_sched *q = qdisc_priv(sch);
sch               349 net/sched/sch_drr.c 	cl = drr_classify(skb, sch, &err);
sch               352 net/sched/sch_drr.c 			qdisc_qstats_drop(sch);
sch               362 net/sched/sch_drr.c 			qdisc_qstats_drop(sch);
sch               372 net/sched/sch_drr.c 	sch->qstats.backlog += len;
sch               373 net/sched/sch_drr.c 	sch->q.qlen++;
sch               377 net/sched/sch_drr.c static struct sk_buff *drr_dequeue(struct Qdisc *sch)
sch               379 net/sched/sch_drr.c 	struct drr_sched *q = qdisc_priv(sch);
sch               404 net/sched/sch_drr.c 			qdisc_bstats_update(sch, skb);
sch               405 net/sched/sch_drr.c 			qdisc_qstats_backlog_dec(sch, skb);
sch               406 net/sched/sch_drr.c 			sch->q.qlen--;
sch               417 net/sched/sch_drr.c static int drr_init_qdisc(struct Qdisc *sch, struct nlattr *opt,
sch               420 net/sched/sch_drr.c 	struct drr_sched *q = qdisc_priv(sch);
sch               423 net/sched/sch_drr.c 	err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
sch               433 net/sched/sch_drr.c static void drr_reset_qdisc(struct Qdisc *sch)
sch               435 net/sched/sch_drr.c 	struct drr_sched *q = qdisc_priv(sch);
sch               446 net/sched/sch_drr.c 	sch->qstats.backlog = 0;
sch               447 net/sched/sch_drr.c 	sch->q.qlen = 0;
sch               450 net/sched/sch_drr.c static void drr_destroy_qdisc(struct Qdisc *sch)
sch               452 net/sched/sch_drr.c 	struct drr_sched *q = qdisc_priv(sch);
sch               462 net/sched/sch_drr.c 			drr_destroy_class(sch, cl);
sch                64 net/sched/sch_dsmark.c static int dsmark_graft(struct Qdisc *sch, unsigned long arg,
sch                68 net/sched/sch_dsmark.c 	struct dsmark_qdisc_data *p = qdisc_priv(sch);
sch                71 net/sched/sch_dsmark.c 		 __func__, sch, p, new, old);
sch                74 net/sched/sch_dsmark.c 		new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
sch                75 net/sched/sch_dsmark.c 					sch->handle, NULL);
sch                80 net/sched/sch_dsmark.c 	*old = qdisc_replace(sch, new, &p->q);
sch                84 net/sched/sch_dsmark.c static struct Qdisc *dsmark_leaf(struct Qdisc *sch, unsigned long arg)
sch                86 net/sched/sch_dsmark.c 	struct dsmark_qdisc_data *p = qdisc_priv(sch);
sch                90 net/sched/sch_dsmark.c static unsigned long dsmark_find(struct Qdisc *sch, u32 classid)
sch                95 net/sched/sch_dsmark.c static unsigned long dsmark_bind_filter(struct Qdisc *sch,
sch                99 net/sched/sch_dsmark.c 		 __func__, sch, qdisc_priv(sch), classid);
sch               101 net/sched/sch_dsmark.c 	return dsmark_find(sch, classid);
sch               104 net/sched/sch_dsmark.c static void dsmark_unbind_filter(struct Qdisc *sch, unsigned long cl)
sch               116 net/sched/sch_dsmark.c static int dsmark_change(struct Qdisc *sch, u32 classid, u32 parent,
sch               120 net/sched/sch_dsmark.c 	struct dsmark_qdisc_data *p = qdisc_priv(sch);
sch               126 net/sched/sch_dsmark.c 		 __func__, sch, p, classid, parent, *arg);
sch               153 net/sched/sch_dsmark.c static int dsmark_delete(struct Qdisc *sch, unsigned long arg)
sch               155 net/sched/sch_dsmark.c 	struct dsmark_qdisc_data *p = qdisc_priv(sch);
sch               166 net/sched/sch_dsmark.c static void dsmark_walk(struct Qdisc *sch, struct qdisc_walker *walker)
sch               168 net/sched/sch_dsmark.c 	struct dsmark_qdisc_data *p = qdisc_priv(sch);
sch               172 net/sched/sch_dsmark.c 		 __func__, sch, p, walker);
sch               181 net/sched/sch_dsmark.c 			if (walker->fn(sch, i + 1, walker) < 0) {
sch               191 net/sched/sch_dsmark.c static struct tcf_block *dsmark_tcf_block(struct Qdisc *sch, unsigned long cl,
sch               194 net/sched/sch_dsmark.c 	struct dsmark_qdisc_data *p = qdisc_priv(sch);
sch               201 net/sched/sch_dsmark.c static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch,
sch               205 net/sched/sch_dsmark.c 	struct dsmark_qdisc_data *p = qdisc_priv(sch);
sch               208 net/sched/sch_dsmark.c 	pr_debug("%s(skb %p,sch %p,[qdisc %p])\n", __func__, skb, sch, p);
sch               239 net/sched/sch_dsmark.c 	if (TC_H_MAJ(skb->priority) == sch->handle)
sch               273 net/sched/sch_dsmark.c 			qdisc_qstats_drop(sch);
sch               277 net/sched/sch_dsmark.c 	sch->qstats.backlog += len;
sch               278 net/sched/sch_dsmark.c 	sch->q.qlen++;
sch               283 net/sched/sch_dsmark.c 	qdisc_drop(skb, sch, to_free);
sch               287 net/sched/sch_dsmark.c static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
sch               289 net/sched/sch_dsmark.c 	struct dsmark_qdisc_data *p = qdisc_priv(sch);
sch               293 net/sched/sch_dsmark.c 	pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
sch               299 net/sched/sch_dsmark.c 	qdisc_bstats_update(sch, skb);
sch               300 net/sched/sch_dsmark.c 	qdisc_qstats_backlog_dec(sch, skb);
sch               301 net/sched/sch_dsmark.c 	sch->q.qlen--;
sch               330 net/sched/sch_dsmark.c static struct sk_buff *dsmark_peek(struct Qdisc *sch)
sch               332 net/sched/sch_dsmark.c 	struct dsmark_qdisc_data *p = qdisc_priv(sch);
sch               334 net/sched/sch_dsmark.c 	pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
sch               339 net/sched/sch_dsmark.c static int dsmark_init(struct Qdisc *sch, struct nlattr *opt,
sch               342 net/sched/sch_dsmark.c 	struct dsmark_qdisc_data *p = qdisc_priv(sch);
sch               349 net/sched/sch_dsmark.c 	pr_debug("%s(sch %p,[qdisc %p],opt %p)\n", __func__, sch, p, opt);
sch               354 net/sched/sch_dsmark.c 	err = tcf_block_get(&p->block, &p->filter_list, sch, extack);
sch               390 net/sched/sch_dsmark.c 	p->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, sch->handle,
sch               404 net/sched/sch_dsmark.c static void dsmark_reset(struct Qdisc *sch)
sch               406 net/sched/sch_dsmark.c 	struct dsmark_qdisc_data *p = qdisc_priv(sch);
sch               408 net/sched/sch_dsmark.c 	pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
sch               410 net/sched/sch_dsmark.c 	sch->qstats.backlog = 0;
sch               411 net/sched/sch_dsmark.c 	sch->q.qlen = 0;
sch               414 net/sched/sch_dsmark.c static void dsmark_destroy(struct Qdisc *sch)
sch               416 net/sched/sch_dsmark.c 	struct dsmark_qdisc_data *p = qdisc_priv(sch);
sch               418 net/sched/sch_dsmark.c 	pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
sch               426 net/sched/sch_dsmark.c static int dsmark_dump_class(struct Qdisc *sch, unsigned long cl,
sch               429 net/sched/sch_dsmark.c 	struct dsmark_qdisc_data *p = qdisc_priv(sch);
sch               432 net/sched/sch_dsmark.c 	pr_debug("%s(sch %p,[qdisc %p],class %ld\n", __func__, sch, p, cl);
sch               437 net/sched/sch_dsmark.c 	tcm->tcm_handle = TC_H_MAKE(TC_H_MAJ(sch->handle), cl - 1);
sch               454 net/sched/sch_dsmark.c static int dsmark_dump(struct Qdisc *sch, struct sk_buff *skb)
sch               456 net/sched/sch_dsmark.c 	struct dsmark_qdisc_data *p = qdisc_priv(sch);
sch                75 net/sched/sch_etf.c static bool is_packet_valid(struct Qdisc *sch, struct sk_buff *nskb)
sch                77 net/sched/sch_etf.c 	struct etf_sched_data *q = qdisc_priv(sch);
sch               108 net/sched/sch_etf.c static struct sk_buff *etf_peek_timesortedlist(struct Qdisc *sch)
sch               110 net/sched/sch_etf.c 	struct etf_sched_data *q = qdisc_priv(sch);
sch               120 net/sched/sch_etf.c static void reset_watchdog(struct Qdisc *sch)
sch               122 net/sched/sch_etf.c 	struct etf_sched_data *q = qdisc_priv(sch);
sch               123 net/sched/sch_etf.c 	struct sk_buff *skb = etf_peek_timesortedlist(sch);
sch               162 net/sched/sch_etf.c static int etf_enqueue_timesortedlist(struct sk_buff *nskb, struct Qdisc *sch,
sch               165 net/sched/sch_etf.c 	struct etf_sched_data *q = qdisc_priv(sch);
sch               170 net/sched/sch_etf.c 	if (!is_packet_valid(sch, nskb)) {
sch               173 net/sched/sch_etf.c 		return qdisc_drop(nskb, sch, to_free);
sch               191 net/sched/sch_etf.c 	qdisc_qstats_backlog_inc(sch, nskb);
sch               192 net/sched/sch_etf.c 	sch->q.qlen++;
sch               195 net/sched/sch_etf.c 	reset_watchdog(sch);
sch               200 net/sched/sch_etf.c static void timesortedlist_drop(struct Qdisc *sch, struct sk_buff *skb,
sch               203 net/sched/sch_etf.c 	struct etf_sched_data *q = qdisc_priv(sch);
sch               218 net/sched/sch_etf.c 		skb->dev = qdisc_dev(sch);
sch               222 net/sched/sch_etf.c 		qdisc_qstats_backlog_dec(sch, skb);
sch               223 net/sched/sch_etf.c 		qdisc_drop(skb, sch, &to_free);
sch               224 net/sched/sch_etf.c 		qdisc_qstats_overlimit(sch);
sch               225 net/sched/sch_etf.c 		sch->q.qlen--;
sch               231 net/sched/sch_etf.c static void timesortedlist_remove(struct Qdisc *sch, struct sk_buff *skb)
sch               233 net/sched/sch_etf.c 	struct etf_sched_data *q = qdisc_priv(sch);
sch               242 net/sched/sch_etf.c 	skb->dev = qdisc_dev(sch);
sch               244 net/sched/sch_etf.c 	qdisc_qstats_backlog_dec(sch, skb);
sch               246 net/sched/sch_etf.c 	qdisc_bstats_update(sch, skb);
sch               250 net/sched/sch_etf.c 	sch->q.qlen--;
sch               253 net/sched/sch_etf.c static struct sk_buff *etf_dequeue_timesortedlist(struct Qdisc *sch)
sch               255 net/sched/sch_etf.c 	struct etf_sched_data *q = qdisc_priv(sch);
sch               259 net/sched/sch_etf.c 	skb = etf_peek_timesortedlist(sch);
sch               267 net/sched/sch_etf.c 		timesortedlist_drop(sch, skb, now);
sch               276 net/sched/sch_etf.c 		timesortedlist_remove(sch, skb);
sch               285 net/sched/sch_etf.c 		timesortedlist_remove(sch, skb);
sch               291 net/sched/sch_etf.c 	reset_watchdog(sch);
sch               346 net/sched/sch_etf.c static int etf_init(struct Qdisc *sch, struct nlattr *opt,
sch               349 net/sched/sch_etf.c 	struct etf_sched_data *q = qdisc_priv(sch);
sch               350 net/sched/sch_etf.c 	struct net_device *dev = qdisc_dev(sch);
sch               382 net/sched/sch_etf.c 	q->queue = sch->dev_queue - netdev_get_tx_queue(dev, 0);
sch               415 net/sched/sch_etf.c 	qdisc_watchdog_init_clockid(&q->watchdog, sch, q->clockid);
sch               420 net/sched/sch_etf.c static void timesortedlist_clear(struct Qdisc *sch)
sch               422 net/sched/sch_etf.c 	struct etf_sched_data *q = qdisc_priv(sch);
sch               432 net/sched/sch_etf.c 		sch->q.qlen--;
sch               436 net/sched/sch_etf.c static void etf_reset(struct Qdisc *sch)
sch               438 net/sched/sch_etf.c 	struct etf_sched_data *q = qdisc_priv(sch);
sch               441 net/sched/sch_etf.c 	if (q->watchdog.qdisc == sch)
sch               445 net/sched/sch_etf.c 	timesortedlist_clear(sch);
sch               446 net/sched/sch_etf.c 	__qdisc_reset_queue(&sch->q);
sch               448 net/sched/sch_etf.c 	sch->qstats.backlog = 0;
sch               449 net/sched/sch_etf.c 	sch->q.qlen = 0;
sch               454 net/sched/sch_etf.c static void etf_destroy(struct Qdisc *sch)
sch               456 net/sched/sch_etf.c 	struct etf_sched_data *q = qdisc_priv(sch);
sch               457 net/sched/sch_etf.c 	struct net_device *dev = qdisc_dev(sch);
sch               460 net/sched/sch_etf.c 	if (q->watchdog.qdisc == sch)
sch               466 net/sched/sch_etf.c static int etf_dump(struct Qdisc *sch, struct sk_buff *skb)
sch               468 net/sched/sch_etf.c 	struct etf_sched_data *q = qdisc_priv(sch);
sch                18 net/sched/sch_fifo.c static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch,
sch                21 net/sched/sch_fifo.c 	if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <= sch->limit))
sch                22 net/sched/sch_fifo.c 		return qdisc_enqueue_tail(skb, sch);
sch                24 net/sched/sch_fifo.c 	return qdisc_drop(skb, sch, to_free);
sch                27 net/sched/sch_fifo.c static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch,
sch                30 net/sched/sch_fifo.c 	if (likely(sch->q.qlen < sch->limit))
sch                31 net/sched/sch_fifo.c 		return qdisc_enqueue_tail(skb, sch);
sch                33 net/sched/sch_fifo.c 	return qdisc_drop(skb, sch, to_free);
sch                36 net/sched/sch_fifo.c static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch,
sch                41 net/sched/sch_fifo.c 	if (likely(sch->q.qlen < sch->limit))
sch                42 net/sched/sch_fifo.c 		return qdisc_enqueue_tail(skb, sch);
sch                44 net/sched/sch_fifo.c 	prev_backlog = sch->qstats.backlog;
sch                46 net/sched/sch_fifo.c 	__qdisc_queue_drop_head(sch, &sch->q, to_free);
sch                47 net/sched/sch_fifo.c 	qdisc_qstats_drop(sch);
sch                48 net/sched/sch_fifo.c 	qdisc_enqueue_tail(skb, sch);
sch                50 net/sched/sch_fifo.c 	qdisc_tree_reduce_backlog(sch, 0, prev_backlog - sch->qstats.backlog);
sch                54 net/sched/sch_fifo.c static int fifo_init(struct Qdisc *sch, struct nlattr *opt,
sch                58 net/sched/sch_fifo.c 	bool is_bfifo = sch->ops == &bfifo_qdisc_ops;
sch                61 net/sched/sch_fifo.c 		u32 limit = qdisc_dev(sch)->tx_queue_len;
sch                64 net/sched/sch_fifo.c 			limit *= psched_mtu(qdisc_dev(sch));
sch                66 net/sched/sch_fifo.c 		sch->limit = limit;
sch                73 net/sched/sch_fifo.c 		sch->limit = ctl->limit;
sch                77 net/sched/sch_fifo.c 		bypass = sch->limit >= psched_mtu(qdisc_dev(sch));
sch                79 net/sched/sch_fifo.c 		bypass = sch->limit >= 1;
sch                82 net/sched/sch_fifo.c 		sch->flags |= TCQ_F_CAN_BYPASS;
sch                84 net/sched/sch_fifo.c 		sch->flags &= ~TCQ_F_CAN_BYPASS;
sch                88 net/sched/sch_fifo.c static int fifo_dump(struct Qdisc *sch, struct sk_buff *skb)
sch                90 net/sched/sch_fifo.c 	struct tc_fifo_qopt opt = { .limit = sch->limit };
sch               164 net/sched/sch_fifo.c struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops,
sch               171 net/sched/sch_fifo.c 	q = qdisc_create_dflt(sch->dev_queue, ops, TC_H_MAKE(sch->handle, 1),
sch               360 net/sched/sch_fq.c static void fq_erase_head(struct Qdisc *sch, struct fq_flow *flow,
sch               367 net/sched/sch_fq.c 		skb->dev = qdisc_dev(sch);
sch               372 net/sched/sch_fq.c static struct sk_buff *fq_dequeue_head(struct Qdisc *sch, struct fq_flow *flow)
sch               377 net/sched/sch_fq.c 		fq_erase_head(sch, flow, skb);
sch               380 net/sched/sch_fq.c 		qdisc_qstats_backlog_dec(sch, skb);
sch               381 net/sched/sch_fq.c 		sch->q.qlen--;
sch               420 net/sched/sch_fq.c static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
sch               423 net/sched/sch_fq.c 	struct fq_sched_data *q = qdisc_priv(sch);
sch               426 net/sched/sch_fq.c 	if (unlikely(sch->q.qlen >= sch->limit))
sch               427 net/sched/sch_fq.c 		return qdisc_drop(skb, sch, to_free);
sch               432 net/sched/sch_fq.c 		return qdisc_drop(skb, sch, to_free);
sch               436 net/sched/sch_fq.c 	qdisc_qstats_backlog_inc(sch, skb);
sch               450 net/sched/sch_fq.c 	sch->q.qlen++;
sch               482 net/sched/sch_fq.c static struct sk_buff *fq_dequeue(struct Qdisc *sch)
sch               484 net/sched/sch_fq.c 	struct fq_sched_data *q = qdisc_priv(sch);
sch               492 net/sched/sch_fq.c 	if (!sch->q.qlen)
sch               495 net/sched/sch_fq.c 	skb = fq_dequeue_head(sch, &q->internal);
sch               539 net/sched/sch_fq.c 	skb = fq_dequeue_head(sch, f);
sch               598 net/sched/sch_fq.c 	qdisc_bstats_update(sch, skb);
sch               618 net/sched/sch_fq.c static void fq_reset(struct Qdisc *sch)
sch               620 net/sched/sch_fq.c 	struct fq_sched_data *q = qdisc_priv(sch);
sch               626 net/sched/sch_fq.c 	sch->q.qlen = 0;
sch               627 net/sched/sch_fq.c 	sch->qstats.backlog = 0;
sch               703 net/sched/sch_fq.c static int fq_resize(struct Qdisc *sch, u32 log)
sch               705 net/sched/sch_fq.c 	struct fq_sched_data *q = qdisc_priv(sch);
sch               715 net/sched/sch_fq.c 			      netdev_queue_numa_node_read(sch->dev_queue));
sch               722 net/sched/sch_fq.c 	sch_tree_lock(sch);
sch               731 net/sched/sch_fq.c 	sch_tree_unlock(sch);
sch               753 net/sched/sch_fq.c static int fq_change(struct Qdisc *sch, struct nlattr *opt,
sch               756 net/sched/sch_fq.c 	struct fq_sched_data *q = qdisc_priv(sch);
sch               770 net/sched/sch_fq.c 	sch_tree_lock(sch);
sch               783 net/sched/sch_fq.c 		sch->limit = nla_get_u32(tb[TCA_FQ_PLIMIT]);
sch               838 net/sched/sch_fq.c 		sch_tree_unlock(sch);
sch               839 net/sched/sch_fq.c 		err = fq_resize(sch, fq_log);
sch               840 net/sched/sch_fq.c 		sch_tree_lock(sch);
sch               842 net/sched/sch_fq.c 	while (sch->q.qlen > sch->limit) {
sch               843 net/sched/sch_fq.c 		struct sk_buff *skb = fq_dequeue(sch);
sch               851 net/sched/sch_fq.c 	qdisc_tree_reduce_backlog(sch, drop_count, drop_len);
sch               853 net/sched/sch_fq.c 	sch_tree_unlock(sch);
sch               857 net/sched/sch_fq.c static void fq_destroy(struct Qdisc *sch)
sch               859 net/sched/sch_fq.c 	struct fq_sched_data *q = qdisc_priv(sch);
sch               861 net/sched/sch_fq.c 	fq_reset(sch);
sch               866 net/sched/sch_fq.c static int fq_init(struct Qdisc *sch, struct nlattr *opt,
sch               869 net/sched/sch_fq.c 	struct fq_sched_data *q = qdisc_priv(sch);
sch               872 net/sched/sch_fq.c 	sch->limit		= 10000;
sch               874 net/sched/sch_fq.c 	q->quantum		= 2 * psched_mtu(qdisc_dev(sch));
sch               875 net/sched/sch_fq.c 	q->initial_quantum	= 10 * psched_mtu(qdisc_dev(sch));
sch               891 net/sched/sch_fq.c 	qdisc_watchdog_init_clockid(&q->watchdog, sch, CLOCK_MONOTONIC);
sch               894 net/sched/sch_fq.c 		err = fq_change(sch, opt, extack);
sch               896 net/sched/sch_fq.c 		err = fq_resize(sch, q->fq_trees_log);
sch               901 net/sched/sch_fq.c static int fq_dump(struct Qdisc *sch, struct sk_buff *skb)
sch               903 net/sched/sch_fq.c 	struct fq_sched_data *q = qdisc_priv(sch);
sch               915 net/sched/sch_fq.c 	if (nla_put_u32(skb, TCA_FQ_PLIMIT, sch->limit) ||
sch               937 net/sched/sch_fq.c static int fq_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
sch               939 net/sched/sch_fq.c 	struct fq_sched_data *q = qdisc_priv(sch);
sch               942 net/sched/sch_fq.c 	sch_tree_lock(sch);
sch               958 net/sched/sch_fq.c 	sch_tree_unlock(sch);
sch                77 net/sched/sch_fq_codel.c static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch,
sch                80 net/sched/sch_fq_codel.c 	struct fq_codel_sched_data *q = qdisc_priv(sch);
sch                85 net/sched/sch_fq_codel.c 	if (TC_H_MAJ(skb->priority) == sch->handle &&
sch               138 net/sched/sch_fq_codel.c static unsigned int fq_codel_drop(struct Qdisc *sch, unsigned int max_packets,
sch               141 net/sched/sch_fq_codel.c 	struct fq_codel_sched_data *q = qdisc_priv(sch);
sch               179 net/sched/sch_fq_codel.c 	sch->qstats.drops += i;
sch               180 net/sched/sch_fq_codel.c 	sch->qstats.backlog -= len;
sch               181 net/sched/sch_fq_codel.c 	sch->q.qlen -= i;
sch               185 net/sched/sch_fq_codel.c static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch,
sch               188 net/sched/sch_fq_codel.c 	struct fq_codel_sched_data *q = qdisc_priv(sch);
sch               195 net/sched/sch_fq_codel.c 	idx = fq_codel_classify(skb, sch, &ret);
sch               198 net/sched/sch_fq_codel.c 			qdisc_qstats_drop(sch);
sch               208 net/sched/sch_fq_codel.c 	qdisc_qstats_backlog_inc(sch, skb);
sch               218 net/sched/sch_fq_codel.c 	if (++sch->q.qlen <= sch->limit && !memory_limited)
sch               221 net/sched/sch_fq_codel.c 	prev_backlog = sch->qstats.backlog;
sch               222 net/sched/sch_fq_codel.c 	prev_qlen = sch->q.qlen;
sch               231 net/sched/sch_fq_codel.c 	ret = fq_codel_drop(sch, q->drop_batch_size, to_free);
sch               233 net/sched/sch_fq_codel.c 	prev_qlen -= sch->q.qlen;
sch               234 net/sched/sch_fq_codel.c 	prev_backlog -= sch->qstats.backlog;
sch               244 net/sched/sch_fq_codel.c 		qdisc_tree_reduce_backlog(sch, prev_qlen - 1,
sch               248 net/sched/sch_fq_codel.c 	qdisc_tree_reduce_backlog(sch, prev_qlen, prev_backlog);
sch               258 net/sched/sch_fq_codel.c 	struct Qdisc *sch = ctx;
sch               259 net/sched/sch_fq_codel.c 	struct fq_codel_sched_data *q = qdisc_priv(sch);
sch               268 net/sched/sch_fq_codel.c 		sch->q.qlen--;
sch               269 net/sched/sch_fq_codel.c 		sch->qstats.backlog -= qdisc_pkt_len(skb);
sch               276 net/sched/sch_fq_codel.c 	struct Qdisc *sch = ctx;
sch               279 net/sched/sch_fq_codel.c 	qdisc_qstats_drop(sch);
sch               282 net/sched/sch_fq_codel.c static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch)
sch               284 net/sched/sch_fq_codel.c 	struct fq_codel_sched_data *q = qdisc_priv(sch);
sch               304 net/sched/sch_fq_codel.c 	skb = codel_dequeue(sch, &sch->qstats.backlog, &q->cparams,
sch               316 net/sched/sch_fq_codel.c 	qdisc_bstats_update(sch, skb);
sch               321 net/sched/sch_fq_codel.c 	if (q->cstats.drop_count && sch->q.qlen) {
sch               322 net/sched/sch_fq_codel.c 		qdisc_tree_reduce_backlog(sch, q->cstats.drop_count,
sch               336 net/sched/sch_fq_codel.c static void fq_codel_reset(struct Qdisc *sch)
sch               338 net/sched/sch_fq_codel.c 	struct fq_codel_sched_data *q = qdisc_priv(sch);
sch               351 net/sched/sch_fq_codel.c 	sch->q.qlen = 0;
sch               352 net/sched/sch_fq_codel.c 	sch->qstats.backlog = 0;
sch               368 net/sched/sch_fq_codel.c static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt,
sch               371 net/sched/sch_fq_codel.c 	struct fq_codel_sched_data *q = qdisc_priv(sch);
sch               390 net/sched/sch_fq_codel.c 	sch_tree_lock(sch);
sch               411 net/sched/sch_fq_codel.c 		sch->limit = nla_get_u32(tb[TCA_FQ_CODEL_LIMIT]);
sch               425 net/sched/sch_fq_codel.c 	while (sch->q.qlen > sch->limit ||
sch               427 net/sched/sch_fq_codel.c 		struct sk_buff *skb = fq_codel_dequeue(sch);
sch               433 net/sched/sch_fq_codel.c 	qdisc_tree_reduce_backlog(sch, q->cstats.drop_count, q->cstats.drop_len);
sch               437 net/sched/sch_fq_codel.c 	sch_tree_unlock(sch);
sch               441 net/sched/sch_fq_codel.c static void fq_codel_destroy(struct Qdisc *sch)
sch               443 net/sched/sch_fq_codel.c 	struct fq_codel_sched_data *q = qdisc_priv(sch);
sch               450 net/sched/sch_fq_codel.c static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt,
sch               453 net/sched/sch_fq_codel.c 	struct fq_codel_sched_data *q = qdisc_priv(sch);
sch               457 net/sched/sch_fq_codel.c 	sch->limit = 10*1024;
sch               461 net/sched/sch_fq_codel.c 	q->quantum = psched_mtu(qdisc_dev(sch));
sch               467 net/sched/sch_fq_codel.c 	q->cparams.mtu = psched_mtu(qdisc_dev(sch));
sch               470 net/sched/sch_fq_codel.c 		err = fq_codel_change(sch, opt, extack);
sch               475 net/sched/sch_fq_codel.c 	err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
sch               499 net/sched/sch_fq_codel.c 	if (sch->limit >= 1)
sch               500 net/sched/sch_fq_codel.c 		sch->flags |= TCQ_F_CAN_BYPASS;
sch               502 net/sched/sch_fq_codel.c 		sch->flags &= ~TCQ_F_CAN_BYPASS;
sch               513 net/sched/sch_fq_codel.c static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb)
sch               515 net/sched/sch_fq_codel.c 	struct fq_codel_sched_data *q = qdisc_priv(sch);
sch               525 net/sched/sch_fq_codel.c 			sch->limit) ||
sch               551 net/sched/sch_fq_codel.c static int fq_codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
sch               553 net/sched/sch_fq_codel.c 	struct fq_codel_sched_data *q = qdisc_priv(sch);
sch               567 net/sched/sch_fq_codel.c 	sch_tree_lock(sch);
sch               573 net/sched/sch_fq_codel.c 	sch_tree_unlock(sch);
sch               578 net/sched/sch_fq_codel.c static struct Qdisc *fq_codel_leaf(struct Qdisc *sch, unsigned long arg)
sch               583 net/sched/sch_fq_codel.c static unsigned long fq_codel_find(struct Qdisc *sch, u32 classid)
sch               588 net/sched/sch_fq_codel.c static unsigned long fq_codel_bind(struct Qdisc *sch, unsigned long parent,
sch               598 net/sched/sch_fq_codel.c static struct tcf_block *fq_codel_tcf_block(struct Qdisc *sch, unsigned long cl,
sch               601 net/sched/sch_fq_codel.c 	struct fq_codel_sched_data *q = qdisc_priv(sch);
sch               608 net/sched/sch_fq_codel.c static int fq_codel_dump_class(struct Qdisc *sch, unsigned long cl,
sch               615 net/sched/sch_fq_codel.c static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl,
sch               618 net/sched/sch_fq_codel.c 	struct fq_codel_sched_data *q = qdisc_priv(sch);
sch               644 net/sched/sch_fq_codel.c 			sch_tree_lock(sch);
sch               650 net/sched/sch_fq_codel.c 			sch_tree_unlock(sch);
sch               662 net/sched/sch_fq_codel.c static void fq_codel_walk(struct Qdisc *sch, struct qdisc_walker *arg)
sch               664 net/sched/sch_fq_codel.c 	struct fq_codel_sched_data *q = qdisc_priv(sch);
sch               676 net/sched/sch_fq_codel.c 		if (arg->fn(sch, i + 1, arg) < 0) {
sch               749 net/sched/sch_generic.c static void pfifo_fast_destroy(struct Qdisc *sch)
sch               751 net/sched/sch_generic.c 	struct pfifo_fast_priv *priv = qdisc_priv(sch);
sch               769 net/sched/sch_generic.c static int pfifo_fast_change_tx_queue_len(struct Qdisc *sch,
sch               772 net/sched/sch_generic.c 	struct pfifo_fast_priv *priv = qdisc_priv(sch);
sch               807 net/sched/sch_generic.c 	struct Qdisc *sch;
sch               808 net/sched/sch_generic.c 	unsigned int size = QDISC_ALIGN(sizeof(*sch)) + ops->priv_size;
sch               824 net/sched/sch_generic.c 	sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
sch               826 net/sched/sch_generic.c 	if (sch != p) {
sch               832 net/sched/sch_generic.c 		sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
sch               833 net/sched/sch_generic.c 		sch->padded = (char *) sch - (char *) p;
sch               835 net/sched/sch_generic.c 	__skb_queue_head_init(&sch->gso_skb);
sch               836 net/sched/sch_generic.c 	__skb_queue_head_init(&sch->skb_bad_txq);
sch               837 net/sched/sch_generic.c 	qdisc_skb_head_init(&sch->q);
sch               838 net/sched/sch_generic.c 	spin_lock_init(&sch->q.lock);
sch               841 net/sched/sch_generic.c 		sch->cpu_bstats =
sch               843 net/sched/sch_generic.c 		if (!sch->cpu_bstats)
sch               846 net/sched/sch_generic.c 		sch->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
sch               847 net/sched/sch_generic.c 		if (!sch->cpu_qstats) {
sch               848 net/sched/sch_generic.c 			free_percpu(sch->cpu_bstats);
sch               853 net/sched/sch_generic.c 	spin_lock_init(&sch->busylock);
sch               855 net/sched/sch_generic.c 	spin_lock_init(&sch->seqlock);
sch               856 net/sched/sch_generic.c 	seqcount_init(&sch->running);
sch               858 net/sched/sch_generic.c 	sch->ops = ops;
sch               859 net/sched/sch_generic.c 	sch->flags = ops->static_flags;
sch               860 net/sched/sch_generic.c 	sch->enqueue = ops->enqueue;
sch               861 net/sched/sch_generic.c 	sch->dequeue = ops->dequeue;
sch               862 net/sched/sch_generic.c 	sch->dev_queue = dev_queue;
sch               863 net/sched/sch_generic.c 	sch->empty = true;
sch               865 net/sched/sch_generic.c 	refcount_set(&sch->refcnt, 1);
sch               867 net/sched/sch_generic.c 	if (sch != &noop_qdisc) {
sch               868 net/sched/sch_generic.c 		lockdep_set_class(&sch->busylock, &dev->qdisc_tx_busylock_key);
sch               869 net/sched/sch_generic.c 		lockdep_set_class(&sch->seqlock, &dev->qdisc_tx_busylock_key);
sch               870 net/sched/sch_generic.c 		lockdep_set_class(&sch->running, &dev->qdisc_running_key);
sch               873 net/sched/sch_generic.c 	return sch;
sch               885 net/sched/sch_generic.c 	struct Qdisc *sch;
sch               892 net/sched/sch_generic.c 	sch = qdisc_alloc(dev_queue, ops, extack);
sch               893 net/sched/sch_generic.c 	if (IS_ERR(sch)) {
sch               897 net/sched/sch_generic.c 	sch->parent = parentid;
sch               899 net/sched/sch_generic.c 	if (!ops->init || ops->init(sch, NULL, extack) == 0)
sch               900 net/sched/sch_generic.c 		return sch;
sch               902 net/sched/sch_generic.c 	qdisc_put(sch);
sch                91 net/sched/sch_gred.c static inline int gred_wred_mode_check(struct Qdisc *sch)
sch                93 net/sched/sch_gred.c 	struct gred_sched *table = qdisc_priv(sch);
sch               114 net/sched/sch_gred.c 					struct Qdisc *sch)
sch               117 net/sched/sch_gred.c 		return sch->qstats.backlog;
sch               164 net/sched/sch_gred.c static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch,
sch               168 net/sched/sch_gred.c 	struct gred_sched *t = qdisc_priv(sch);
sch               181 net/sched/sch_gred.c 			if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <=
sch               182 net/sched/sch_gred.c 					sch->limit))
sch               183 net/sched/sch_gred.c 				return qdisc_enqueue_tail(skb, sch);
sch               213 net/sched/sch_gred.c 				     gred_backlog(t, q, sch));
sch               226 net/sched/sch_gred.c 		qdisc_qstats_overlimit(sch);
sch               236 net/sched/sch_gred.c 		qdisc_qstats_overlimit(sch);
sch               246 net/sched/sch_gred.c 	if (gred_backlog(t, q, sch) + qdisc_pkt_len(skb) <= q->limit) {
sch               248 net/sched/sch_gred.c 		return qdisc_enqueue_tail(skb, sch);
sch               253 net/sched/sch_gred.c 	return qdisc_drop(skb, sch, to_free);
sch               256 net/sched/sch_gred.c 	qdisc_drop(skb, sch, to_free);
sch               260 net/sched/sch_gred.c static struct sk_buff *gred_dequeue(struct Qdisc *sch)
sch               263 net/sched/sch_gred.c 	struct gred_sched *t = qdisc_priv(sch);
sch               265 net/sched/sch_gred.c 	skb = qdisc_dequeue_head(sch);
sch               278 net/sched/sch_gred.c 				if (!sch->qstats.backlog)
sch               292 net/sched/sch_gred.c static void gred_reset(struct Qdisc *sch)
sch               295 net/sched/sch_gred.c 	struct gred_sched *t = qdisc_priv(sch);
sch               297 net/sched/sch_gred.c 	qdisc_reset_queue(sch);
sch               310 net/sched/sch_gred.c static void gred_offload(struct Qdisc *sch, enum tc_gred_command command)
sch               312 net/sched/sch_gred.c 	struct gred_sched *table = qdisc_priv(sch);
sch               313 net/sched/sch_gred.c 	struct net_device *dev = qdisc_dev(sch);
sch               316 net/sched/sch_gred.c 		.handle		= sch->handle,
sch               317 net/sched/sch_gred.c 		.parent		= sch->parent,
sch               346 net/sched/sch_gred.c 		opt.set.qstats = &sch->qstats;
sch               352 net/sched/sch_gred.c static int gred_offload_dump_stats(struct Qdisc *sch)
sch               354 net/sched/sch_gred.c 	struct gred_sched *table = qdisc_priv(sch);
sch               364 net/sched/sch_gred.c 	hw_stats->handle = sch->handle;
sch               365 net/sched/sch_gred.c 	hw_stats->parent = sch->parent;
sch               371 net/sched/sch_gred.c 	ret = qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_GRED, hw_stats);
sch               382 net/sched/sch_gred.c 		_bstats_update(&sch->bstats,
sch               385 net/sched/sch_gred.c 		sch->qstats.qlen += hw_stats->stats.qstats[i].qlen;
sch               386 net/sched/sch_gred.c 		sch->qstats.backlog += hw_stats->stats.qstats[i].backlog;
sch               387 net/sched/sch_gred.c 		sch->qstats.drops += hw_stats->stats.qstats[i].drops;
sch               388 net/sched/sch_gred.c 		sch->qstats.requeues += hw_stats->stats.qstats[i].requeues;
sch               389 net/sched/sch_gred.c 		sch->qstats.overlimits += hw_stats->stats.qstats[i].overlimits;
sch               401 net/sched/sch_gred.c static int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps,
sch               404 net/sched/sch_gred.c 	struct gred_sched *table = qdisc_priv(sch);
sch               432 net/sched/sch_gred.c 	sch_tree_lock(sch);
sch               443 net/sched/sch_gred.c 	sch_tree_unlock(sch);
sch               448 net/sched/sch_gred.c 		if (gred_wred_mode_check(sch))
sch               470 net/sched/sch_gred.c 	gred_offload(sch, TC_GRED_REPLACE);
sch               474 net/sched/sch_gred.c static inline int gred_change_vq(struct Qdisc *sch, int dp,
sch               480 net/sched/sch_gred.c 	struct gred_sched *table = qdisc_priv(sch);
sch               498 net/sched/sch_gred.c 	if (ctl->limit > sch->limit)
sch               499 net/sched/sch_gred.c 		q->limit = sch->limit;
sch               635 net/sched/sch_gred.c static int gred_change(struct Qdisc *sch, struct nlattr *opt,
sch               638 net/sched/sch_gred.c 	struct gred_sched *table = qdisc_priv(sch);
sch               656 net/sched/sch_gred.c 			sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]);
sch               657 net/sched/sch_gred.c 		return gred_change_table_def(sch, tb[TCA_GRED_DPS], extack);
sch               700 net/sched/sch_gred.c 	sch_tree_lock(sch);
sch               702 net/sched/sch_gred.c 	err = gred_change_vq(sch, ctl->DP, ctl, prio, stab, max_P, &prealloc,
sch               712 net/sched/sch_gred.c 		if (gred_wred_mode_check(sch))
sch               716 net/sched/sch_gred.c 	sch_tree_unlock(sch);
sch               719 net/sched/sch_gred.c 	gred_offload(sch, TC_GRED_REPLACE);
sch               723 net/sched/sch_gred.c 	sch_tree_unlock(sch);
sch               728 net/sched/sch_gred.c static int gred_init(struct Qdisc *sch, struct nlattr *opt,
sch               749 net/sched/sch_gred.c 		sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]);
sch               751 net/sched/sch_gred.c 		sch->limit = qdisc_dev(sch)->tx_queue_len
sch               752 net/sched/sch_gred.c 		             * psched_mtu(qdisc_dev(sch));
sch               754 net/sched/sch_gred.c 	return gred_change_table_def(sch, tb[TCA_GRED_DPS], extack);
sch               757 net/sched/sch_gred.c static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
sch               759 net/sched/sch_gred.c 	struct gred_sched *table = qdisc_priv(sch);
sch               770 net/sched/sch_gred.c 	if (gred_offload_dump_stats(sch))
sch               787 net/sched/sch_gred.c 	if (nla_put_u32(skb, TCA_GRED_LIMIT, sch->limit))
sch               813 net/sched/sch_gred.c 		opt.backlog	= gred_backlog(table, q, sch);
sch               870 net/sched/sch_gred.c 				gred_backlog(table, q, sch)))
sch               900 net/sched/sch_gred.c static void gred_destroy(struct Qdisc *sch)
sch               902 net/sched/sch_gred.c 	struct gred_sched *table = qdisc_priv(sch);
sch               909 net/sched/sch_gred.c 	gred_offload(sch, TC_GRED_DESTROY);
sch               832 net/sched/sch_hfsc.c qdisc_peek_len(struct Qdisc *sch)
sch               837 net/sched/sch_hfsc.c 	skb = sch->ops->peek(sch);
sch               839 net/sched/sch_hfsc.c 		qdisc_warn_nonwc("qdisc_peek_len", sch);
sch               864 net/sched/sch_hfsc.c hfsc_find_class(u32 classid, struct Qdisc *sch)
sch               866 net/sched/sch_hfsc.c 	struct hfsc_sched *q = qdisc_priv(sch);
sch               913 net/sched/sch_hfsc.c hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
sch               917 net/sched/sch_hfsc.c 	struct hfsc_sched *q = qdisc_priv(sch);
sch               968 net/sched/sch_hfsc.c 						    qdisc_root_sleeping_running(sch),
sch               974 net/sched/sch_hfsc.c 		sch_tree_lock(sch);
sch              1001 net/sched/sch_hfsc.c 		sch_tree_unlock(sch);
sch              1011 net/sched/sch_hfsc.c 		parent = hfsc_find_class(parentid, sch);
sch              1016 net/sched/sch_hfsc.c 	if (classid == 0 || TC_H_MAJ(classid ^ sch->handle) != 0)
sch              1018 net/sched/sch_hfsc.c 	if (hfsc_find_class(classid, sch))
sch              1028 net/sched/sch_hfsc.c 	err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack);
sch              1037 net/sched/sch_hfsc.c 					qdisc_root_sleeping_running(sch),
sch              1056 net/sched/sch_hfsc.c 	cl->qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
sch              1066 net/sched/sch_hfsc.c 	sch_tree_lock(sch);
sch              1072 net/sched/sch_hfsc.c 	sch_tree_unlock(sch);
sch              1074 net/sched/sch_hfsc.c 	qdisc_class_hash_grow(sch, &q->clhash);
sch              1081 net/sched/sch_hfsc.c hfsc_destroy_class(struct Qdisc *sch, struct hfsc_class *cl)
sch              1083 net/sched/sch_hfsc.c 	struct hfsc_sched *q = qdisc_priv(sch);
sch              1093 net/sched/sch_hfsc.c hfsc_delete_class(struct Qdisc *sch, unsigned long arg)
sch              1095 net/sched/sch_hfsc.c 	struct hfsc_sched *q = qdisc_priv(sch);
sch              1101 net/sched/sch_hfsc.c 	sch_tree_lock(sch);
sch              1109 net/sched/sch_hfsc.c 	sch_tree_unlock(sch);
sch              1111 net/sched/sch_hfsc.c 	hfsc_destroy_class(sch, cl);
sch              1116 net/sched/sch_hfsc.c hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
sch              1118 net/sched/sch_hfsc.c 	struct hfsc_sched *q = qdisc_priv(sch);
sch              1124 net/sched/sch_hfsc.c 	if (TC_H_MAJ(skb->priority ^ sch->handle) == 0 &&
sch              1125 net/sched/sch_hfsc.c 	    (cl = hfsc_find_class(skb->priority, sch)) != NULL)
sch              1146 net/sched/sch_hfsc.c 			cl = hfsc_find_class(res.classid, sch);
sch              1162 net/sched/sch_hfsc.c 	cl = hfsc_find_class(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch);
sch              1170 net/sched/sch_hfsc.c hfsc_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
sch              1178 net/sched/sch_hfsc.c 		new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
sch              1184 net/sched/sch_hfsc.c 	*old = qdisc_replace(sch, new, &cl->qdisc);
sch              1189 net/sched/sch_hfsc.c hfsc_class_leaf(struct Qdisc *sch, unsigned long arg)
sch              1200 net/sched/sch_hfsc.c hfsc_qlen_notify(struct Qdisc *sch, unsigned long arg)
sch              1213 net/sched/sch_hfsc.c hfsc_search_class(struct Qdisc *sch, u32 classid)
sch              1215 net/sched/sch_hfsc.c 	return (unsigned long)hfsc_find_class(classid, sch);
sch              1219 net/sched/sch_hfsc.c hfsc_bind_tcf(struct Qdisc *sch, unsigned long parent, u32 classid)
sch              1222 net/sched/sch_hfsc.c 	struct hfsc_class *cl = hfsc_find_class(classid, sch);
sch              1234 net/sched/sch_hfsc.c hfsc_unbind_tcf(struct Qdisc *sch, unsigned long arg)
sch              1241 net/sched/sch_hfsc.c static struct tcf_block *hfsc_tcf_block(struct Qdisc *sch, unsigned long arg,
sch              1244 net/sched/sch_hfsc.c 	struct hfsc_sched *q = qdisc_priv(sch);
sch              1292 net/sched/sch_hfsc.c hfsc_dump_class(struct Qdisc *sch, unsigned long arg, struct sk_buff *skb,
sch              1317 net/sched/sch_hfsc.c hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
sch              1330 net/sched/sch_hfsc.c 	if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), d, NULL, &cl->bstats) < 0 ||
sch              1341 net/sched/sch_hfsc.c hfsc_walk(struct Qdisc *sch, struct qdisc_walker *arg)
sch              1343 net/sched/sch_hfsc.c 	struct hfsc_sched *q = qdisc_priv(sch);
sch              1357 net/sched/sch_hfsc.c 			if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
sch              1367 net/sched/sch_hfsc.c hfsc_schedule_watchdog(struct Qdisc *sch)
sch              1369 net/sched/sch_hfsc.c 	struct hfsc_sched *q = qdisc_priv(sch);
sch              1385 net/sched/sch_hfsc.c hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt,
sch              1388 net/sched/sch_hfsc.c 	struct hfsc_sched *q = qdisc_priv(sch);
sch              1392 net/sched/sch_hfsc.c 	qdisc_watchdog_init(&q->watchdog, sch);
sch              1404 net/sched/sch_hfsc.c 	err = tcf_block_get(&q->root.block, &q->root.filter_list, sch, extack);
sch              1408 net/sched/sch_hfsc.c 	q->root.cl_common.classid = sch->handle;
sch              1410 net/sched/sch_hfsc.c 	q->root.qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
sch              1411 net/sched/sch_hfsc.c 					  sch->handle, NULL);
sch              1421 net/sched/sch_hfsc.c 	qdisc_class_hash_grow(sch, &q->clhash);
sch              1427 net/sched/sch_hfsc.c hfsc_change_qdisc(struct Qdisc *sch, struct nlattr *opt,
sch              1430 net/sched/sch_hfsc.c 	struct hfsc_sched *q = qdisc_priv(sch);
sch              1437 net/sched/sch_hfsc.c 	sch_tree_lock(sch);
sch              1439 net/sched/sch_hfsc.c 	sch_tree_unlock(sch);
sch              1475 net/sched/sch_hfsc.c hfsc_reset_qdisc(struct Qdisc *sch)
sch              1477 net/sched/sch_hfsc.c 	struct hfsc_sched *q = qdisc_priv(sch);
sch              1487 net/sched/sch_hfsc.c 	sch->qstats.backlog = 0;
sch              1488 net/sched/sch_hfsc.c 	sch->q.qlen = 0;
sch              1492 net/sched/sch_hfsc.c hfsc_destroy_qdisc(struct Qdisc *sch)
sch              1494 net/sched/sch_hfsc.c 	struct hfsc_sched *q = qdisc_priv(sch);
sch              1508 net/sched/sch_hfsc.c 			hfsc_destroy_class(sch, cl);
sch              1515 net/sched/sch_hfsc.c hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb)
sch              1517 net/sched/sch_hfsc.c 	struct hfsc_sched *q = qdisc_priv(sch);
sch              1532 net/sched/sch_hfsc.c hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
sch              1539 net/sched/sch_hfsc.c 	cl = hfsc_classify(skb, sch, &err);
sch              1542 net/sched/sch_hfsc.c 			qdisc_qstats_drop(sch);
sch              1552 net/sched/sch_hfsc.c 			qdisc_qstats_drop(sch);
sch              1572 net/sched/sch_hfsc.c 	sch->qstats.backlog += len;
sch              1573 net/sched/sch_hfsc.c 	sch->q.qlen++;
sch              1579 net/sched/sch_hfsc.c hfsc_dequeue(struct Qdisc *sch)
sch              1581 net/sched/sch_hfsc.c 	struct hfsc_sched *q = qdisc_priv(sch);
sch              1588 net/sched/sch_hfsc.c 	if (sch->q.qlen == 0)
sch              1608 net/sched/sch_hfsc.c 			qdisc_qstats_overlimit(sch);
sch              1609 net/sched/sch_hfsc.c 			hfsc_schedule_watchdog(sch);
sch              1639 net/sched/sch_hfsc.c 	qdisc_bstats_update(sch, skb);
sch              1640 net/sched/sch_hfsc.c 	qdisc_qstats_backlog_dec(sch, skb);
sch              1641 net/sched/sch_hfsc.c 	sch->q.qlen--;
sch               247 net/sched/sch_hhf.c static enum wdrr_bucket_idx hhf_classify(struct sk_buff *skb, struct Qdisc *sch)
sch               249 net/sched/sch_hhf.c 	struct hhf_sched_data *q = qdisc_priv(sch);
sch               349 net/sched/sch_hhf.c static unsigned int hhf_drop(struct Qdisc *sch, struct sk_buff **to_free)
sch               351 net/sched/sch_hhf.c 	struct hhf_sched_data *q = qdisc_priv(sch);
sch               362 net/sched/sch_hhf.c 		sch->q.qlen--;
sch               363 net/sched/sch_hhf.c 		qdisc_qstats_backlog_dec(sch, skb);
sch               364 net/sched/sch_hhf.c 		qdisc_drop(skb, sch, to_free);
sch               371 net/sched/sch_hhf.c static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch,
sch               374 net/sched/sch_hhf.c 	struct hhf_sched_data *q = qdisc_priv(sch);
sch               379 net/sched/sch_hhf.c 	idx = hhf_classify(skb, sch);
sch               383 net/sched/sch_hhf.c 	qdisc_qstats_backlog_inc(sch, skb);
sch               402 net/sched/sch_hhf.c 	if (++sch->q.qlen <= sch->limit)
sch               405 net/sched/sch_hhf.c 	prev_backlog = sch->qstats.backlog;
sch               410 net/sched/sch_hhf.c 	if (hhf_drop(sch, to_free) == idx)
sch               414 net/sched/sch_hhf.c 	qdisc_tree_reduce_backlog(sch, 1, prev_backlog - sch->qstats.backlog);
sch               418 net/sched/sch_hhf.c static struct sk_buff *hhf_dequeue(struct Qdisc *sch)
sch               420 net/sched/sch_hhf.c 	struct hhf_sched_data *q = qdisc_priv(sch);
sch               445 net/sched/sch_hhf.c 		sch->q.qlen--;
sch               446 net/sched/sch_hhf.c 		qdisc_qstats_backlog_dec(sch, skb);
sch               457 net/sched/sch_hhf.c 	qdisc_bstats_update(sch, skb);
sch               463 net/sched/sch_hhf.c static void hhf_reset(struct Qdisc *sch)
sch               467 net/sched/sch_hhf.c 	while ((skb = hhf_dequeue(sch)) != NULL)
sch               471 net/sched/sch_hhf.c static void hhf_destroy(struct Qdisc *sch)
sch               474 net/sched/sch_hhf.c 	struct hhf_sched_data *q = qdisc_priv(sch);
sch               508 net/sched/sch_hhf.c static int hhf_change(struct Qdisc *sch, struct nlattr *opt,
sch               511 net/sched/sch_hhf.c 	struct hhf_sched_data *q = qdisc_priv(sch);
sch               537 net/sched/sch_hhf.c 	sch_tree_lock(sch);
sch               540 net/sched/sch_hhf.c 		sch->limit = nla_get_u32(tb[TCA_HHF_BACKLOG_LIMIT]);
sch               563 net/sched/sch_hhf.c 	qlen = sch->q.qlen;
sch               564 net/sched/sch_hhf.c 	prev_backlog = sch->qstats.backlog;
sch               565 net/sched/sch_hhf.c 	while (sch->q.qlen > sch->limit) {
sch               566 net/sched/sch_hhf.c 		struct sk_buff *skb = hhf_dequeue(sch);
sch               570 net/sched/sch_hhf.c 	qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen,
sch               571 net/sched/sch_hhf.c 				  prev_backlog - sch->qstats.backlog);
sch               573 net/sched/sch_hhf.c 	sch_tree_unlock(sch);
sch               577 net/sched/sch_hhf.c static int hhf_init(struct Qdisc *sch, struct nlattr *opt,
sch               580 net/sched/sch_hhf.c 	struct hhf_sched_data *q = qdisc_priv(sch);
sch               583 net/sched/sch_hhf.c 	sch->limit = 1000;
sch               584 net/sched/sch_hhf.c 	q->quantum = psched_mtu(qdisc_dev(sch));
sch               596 net/sched/sch_hhf.c 		int err = hhf_change(sch, opt, extack);
sch               654 net/sched/sch_hhf.c static int hhf_dump(struct Qdisc *sch, struct sk_buff *skb)
sch               656 net/sched/sch_hhf.c 	struct hhf_sched_data *q = qdisc_priv(sch);
sch               663 net/sched/sch_hhf.c 	if (nla_put_u32(skb, TCA_HHF_BACKLOG_LIMIT, sch->limit) ||
sch               680 net/sched/sch_hhf.c static int hhf_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
sch               682 net/sched/sch_hhf.c 	struct hhf_sched_data *q = qdisc_priv(sch);
sch               180 net/sched/sch_htb.c static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
sch               182 net/sched/sch_htb.c 	struct htb_sched *q = qdisc_priv(sch);
sch               191 net/sched/sch_htb.c static unsigned long htb_search(struct Qdisc *sch, u32 handle)
sch               193 net/sched/sch_htb.c 	return (unsigned long)htb_find(handle, sch);
sch               209 net/sched/sch_htb.c static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
sch               212 net/sched/sch_htb.c 	struct htb_sched *q = qdisc_priv(sch);
sch               222 net/sched/sch_htb.c 	if (skb->priority == sch->handle)
sch               224 net/sched/sch_htb.c 	cl = htb_find(skb->priority, sch);
sch               249 net/sched/sch_htb.c 			if (res.classid == sch->handle)
sch               251 net/sched/sch_htb.c 			cl = htb_find(res.classid, sch);
sch               262 net/sched/sch_htb.c 	cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch);
sch               579 net/sched/sch_htb.c static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
sch               584 net/sched/sch_htb.c 	struct htb_sched *q = qdisc_priv(sch);
sch               585 net/sched/sch_htb.c 	struct htb_class *cl = htb_classify(skb, sch, &ret);
sch               593 net/sched/sch_htb.c 			return qdisc_drop(skb, sch, to_free);
sch               598 net/sched/sch_htb.c 			qdisc_qstats_drop(sch);
sch               605 net/sched/sch_htb.c 			qdisc_qstats_drop(sch);
sch               613 net/sched/sch_htb.c 	sch->qstats.backlog += len;
sch               614 net/sched/sch_htb.c 	sch->q.qlen++;
sch               889 net/sched/sch_htb.c static struct sk_buff *htb_dequeue(struct Qdisc *sch)
sch               892 net/sched/sch_htb.c 	struct htb_sched *q = qdisc_priv(sch);
sch               901 net/sched/sch_htb.c 		qdisc_bstats_update(sch, skb);
sch               902 net/sched/sch_htb.c 		qdisc_qstats_backlog_dec(sch, skb);
sch               903 net/sched/sch_htb.c 		sch->q.qlen--;
sch               907 net/sched/sch_htb.c 	if (!sch->q.qlen)
sch               949 net/sched/sch_htb.c static void htb_reset(struct Qdisc *sch)
sch               951 net/sched/sch_htb.c 	struct htb_sched *q = qdisc_priv(sch);
sch               969 net/sched/sch_htb.c 	sch->q.qlen = 0;
sch               970 net/sched/sch_htb.c 	sch->qstats.backlog = 0;
sch               988 net/sched/sch_htb.c 	struct Qdisc *sch = q->watchdog.qdisc;
sch               991 net/sched/sch_htb.c 	__netif_schedule(qdisc_root(sch));
sch               995 net/sched/sch_htb.c static int htb_init(struct Qdisc *sch, struct nlattr *opt,
sch               998 net/sched/sch_htb.c 	struct htb_sched *q = qdisc_priv(sch);
sch              1003 net/sched/sch_htb.c 	qdisc_watchdog_init(&q->watchdog, sch);
sch              1009 net/sched/sch_htb.c 	err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
sch              1034 net/sched/sch_htb.c 		q->direct_qlen = qdisc_dev(sch)->tx_queue_len;
sch              1043 net/sched/sch_htb.c static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
sch              1045 net/sched/sch_htb.c 	struct htb_sched *q = qdisc_priv(sch);
sch              1049 net/sched/sch_htb.c 	sch->qstats.overlimits = q->overlimits;
sch              1074 net/sched/sch_htb.c static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
sch              1121 net/sched/sch_htb.c htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
sch              1138 net/sched/sch_htb.c 	if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
sch              1147 net/sched/sch_htb.c static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
sch              1155 net/sched/sch_htb.c 	    (new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
sch              1159 net/sched/sch_htb.c 	*old = qdisc_replace(sch, new, &cl->leaf.q);
sch              1163 net/sched/sch_htb.c static struct Qdisc *htb_leaf(struct Qdisc *sch, unsigned long arg)
sch              1169 net/sched/sch_htb.c static void htb_qlen_notify(struct Qdisc *sch, unsigned long arg)
sch              1173 net/sched/sch_htb.c 	htb_deactivate(qdisc_priv(sch), cl);
sch              1207 net/sched/sch_htb.c static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
sch              1218 net/sched/sch_htb.c static void htb_destroy(struct Qdisc *sch)
sch              1220 net/sched/sch_htb.c 	struct htb_sched *q = qdisc_priv(sch);
sch              1243 net/sched/sch_htb.c 			htb_destroy_class(sch, cl);
sch              1249 net/sched/sch_htb.c static int htb_delete(struct Qdisc *sch, unsigned long arg)
sch              1251 net/sched/sch_htb.c 	struct htb_sched *q = qdisc_priv(sch);
sch              1264 net/sched/sch_htb.c 		new_q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
sch              1270 net/sched/sch_htb.c 	sch_tree_lock(sch);
sch              1290 net/sched/sch_htb.c 	sch_tree_unlock(sch);
sch              1292 net/sched/sch_htb.c 	htb_destroy_class(sch, cl);
sch              1296 net/sched/sch_htb.c static int htb_change_class(struct Qdisc *sch, u32 classid,
sch              1301 net/sched/sch_htb.c 	struct htb_sched *q = qdisc_priv(sch);
sch              1323 net/sched/sch_htb.c 	parent = parentid == TC_H_ROOT ? NULL : htb_find(parentid, sch);
sch              1357 net/sched/sch_htb.c 		if (!classid || TC_H_MAJ(classid ^ sch->handle) ||
sch              1358 net/sched/sch_htb.c 		    htb_find(classid, sch))
sch              1371 net/sched/sch_htb.c 		err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack);
sch              1380 net/sched/sch_htb.c 						qdisc_root_sleeping_running(sch),
sch              1399 net/sched/sch_htb.c 		new_q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
sch              1401 net/sched/sch_htb.c 		sch_tree_lock(sch);
sch              1442 net/sched/sch_htb.c 						    qdisc_root_sleeping_running(sch),
sch              1447 net/sched/sch_htb.c 		sch_tree_lock(sch);
sch              1483 net/sched/sch_htb.c 	sch_tree_unlock(sch);
sch              1490 net/sched/sch_htb.c 	qdisc_class_hash_grow(sch, &q->clhash);
sch              1499 net/sched/sch_htb.c static struct tcf_block *htb_tcf_block(struct Qdisc *sch, unsigned long arg,
sch              1502 net/sched/sch_htb.c 	struct htb_sched *q = qdisc_priv(sch);
sch              1508 net/sched/sch_htb.c static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent,
sch              1511 net/sched/sch_htb.c 	struct htb_class *cl = htb_find(classid, sch);
sch              1527 net/sched/sch_htb.c static void htb_unbind_filter(struct Qdisc *sch, unsigned long arg)
sch              1535 net/sched/sch_htb.c static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg)
sch              1537 net/sched/sch_htb.c 	struct htb_sched *q = qdisc_priv(sch);
sch              1550 net/sched/sch_htb.c 			if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
sch                23 net/sched/sch_ingress.c static struct Qdisc *ingress_leaf(struct Qdisc *sch, unsigned long arg)
sch                28 net/sched/sch_ingress.c static unsigned long ingress_find(struct Qdisc *sch, u32 classid)
sch                33 net/sched/sch_ingress.c static unsigned long ingress_bind_filter(struct Qdisc *sch,
sch                36 net/sched/sch_ingress.c 	return ingress_find(sch, classid);
sch                39 net/sched/sch_ingress.c static void ingress_unbind_filter(struct Qdisc *sch, unsigned long cl)
sch                43 net/sched/sch_ingress.c static void ingress_walk(struct Qdisc *sch, struct qdisc_walker *walker)
sch                47 net/sched/sch_ingress.c static struct tcf_block *ingress_tcf_block(struct Qdisc *sch, unsigned long cl,
sch                50 net/sched/sch_ingress.c 	struct ingress_sched_data *q = qdisc_priv(sch);
sch                62 net/sched/sch_ingress.c static void ingress_ingress_block_set(struct Qdisc *sch, u32 block_index)
sch                64 net/sched/sch_ingress.c 	struct ingress_sched_data *q = qdisc_priv(sch);
sch                69 net/sched/sch_ingress.c static u32 ingress_ingress_block_get(struct Qdisc *sch)
sch                71 net/sched/sch_ingress.c 	struct ingress_sched_data *q = qdisc_priv(sch);
sch                76 net/sched/sch_ingress.c static int ingress_init(struct Qdisc *sch, struct nlattr *opt,
sch                79 net/sched/sch_ingress.c 	struct ingress_sched_data *q = qdisc_priv(sch);
sch                80 net/sched/sch_ingress.c 	struct net_device *dev = qdisc_dev(sch);
sch                84 net/sched/sch_ingress.c 	mini_qdisc_pair_init(&q->miniqp, sch, &dev->miniq_ingress);
sch                90 net/sched/sch_ingress.c 	return tcf_block_get_ext(&q->block, sch, &q->block_info, extack);
sch                93 net/sched/sch_ingress.c static void ingress_destroy(struct Qdisc *sch)
sch                95 net/sched/sch_ingress.c 	struct ingress_sched_data *q = qdisc_priv(sch);
sch                97 net/sched/sch_ingress.c 	tcf_block_put_ext(q->block, sch, &q->block_info);
sch               101 net/sched/sch_ingress.c static int ingress_dump(struct Qdisc *sch, struct sk_buff *skb)
sch               148 net/sched/sch_ingress.c static unsigned long clsact_find(struct Qdisc *sch, u32 classid)
sch               159 net/sched/sch_ingress.c static unsigned long clsact_bind_filter(struct Qdisc *sch,
sch               162 net/sched/sch_ingress.c 	return clsact_find(sch, classid);
sch               165 net/sched/sch_ingress.c static struct tcf_block *clsact_tcf_block(struct Qdisc *sch, unsigned long cl,
sch               168 net/sched/sch_ingress.c 	struct clsact_sched_data *q = qdisc_priv(sch);
sch               180 net/sched/sch_ingress.c static void clsact_ingress_block_set(struct Qdisc *sch, u32 block_index)
sch               182 net/sched/sch_ingress.c 	struct clsact_sched_data *q = qdisc_priv(sch);
sch               187 net/sched/sch_ingress.c static void clsact_egress_block_set(struct Qdisc *sch, u32 block_index)
sch               189 net/sched/sch_ingress.c 	struct clsact_sched_data *q = qdisc_priv(sch);
sch               194 net/sched/sch_ingress.c static u32 clsact_ingress_block_get(struct Qdisc *sch)
sch               196 net/sched/sch_ingress.c 	struct clsact_sched_data *q = qdisc_priv(sch);
sch               201 net/sched/sch_ingress.c static u32 clsact_egress_block_get(struct Qdisc *sch)
sch               203 net/sched/sch_ingress.c 	struct clsact_sched_data *q = qdisc_priv(sch);
sch               208 net/sched/sch_ingress.c static int clsact_init(struct Qdisc *sch, struct nlattr *opt,
sch               211 net/sched/sch_ingress.c 	struct clsact_sched_data *q = qdisc_priv(sch);
sch               212 net/sched/sch_ingress.c 	struct net_device *dev = qdisc_dev(sch);
sch               218 net/sched/sch_ingress.c 	mini_qdisc_pair_init(&q->miniqp_ingress, sch, &dev->miniq_ingress);
sch               224 net/sched/sch_ingress.c 	err = tcf_block_get_ext(&q->ingress_block, sch, &q->ingress_block_info,
sch               229 net/sched/sch_ingress.c 	mini_qdisc_pair_init(&q->miniqp_egress, sch, &dev->miniq_egress);
sch               235 net/sched/sch_ingress.c 	return tcf_block_get_ext(&q->egress_block, sch, &q->egress_block_info, extack);
sch               238 net/sched/sch_ingress.c static void clsact_destroy(struct Qdisc *sch)
sch               240 net/sched/sch_ingress.c 	struct clsact_sched_data *q = qdisc_priv(sch);
sch               242 net/sched/sch_ingress.c 	tcf_block_put_ext(q->egress_block, sch, &q->egress_block_info);
sch               243 net/sched/sch_ingress.c 	tcf_block_put_ext(q->ingress_block, sch, &q->ingress_block_info);
sch                24 net/sched/sch_mq.c static int mq_offload(struct Qdisc *sch, enum tc_mq_command cmd)
sch                26 net/sched/sch_mq.c 	struct net_device *dev = qdisc_dev(sch);
sch                29 net/sched/sch_mq.c 		.handle = sch->handle,
sch                38 net/sched/sch_mq.c static int mq_offload_stats(struct Qdisc *sch)
sch                42 net/sched/sch_mq.c 		.handle = sch->handle,
sch                44 net/sched/sch_mq.c 			.bstats = &sch->bstats,
sch                45 net/sched/sch_mq.c 			.qstats = &sch->qstats,
sch                49 net/sched/sch_mq.c 	return qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_MQ, &opt);
sch                52 net/sched/sch_mq.c static void mq_destroy(struct Qdisc *sch)
sch                54 net/sched/sch_mq.c 	struct net_device *dev = qdisc_dev(sch);
sch                55 net/sched/sch_mq.c 	struct mq_sched *priv = qdisc_priv(sch);
sch                58 net/sched/sch_mq.c 	mq_offload(sch, TC_MQ_DESTROY);
sch                67 net/sched/sch_mq.c static int mq_init(struct Qdisc *sch, struct nlattr *opt,
sch                70 net/sched/sch_mq.c 	struct net_device *dev = qdisc_dev(sch);
sch                71 net/sched/sch_mq.c 	struct mq_sched *priv = qdisc_priv(sch);
sch                76 net/sched/sch_mq.c 	if (sch->parent != TC_H_ROOT)
sch                91 net/sched/sch_mq.c 					  TC_H_MAKE(TC_H_MAJ(sch->handle),
sch               100 net/sched/sch_mq.c 	sch->flags |= TCQ_F_MQROOT;
sch               102 net/sched/sch_mq.c 	mq_offload(sch, TC_MQ_CREATE);
sch               106 net/sched/sch_mq.c static void mq_attach(struct Qdisc *sch)
sch               108 net/sched/sch_mq.c 	struct net_device *dev = qdisc_dev(sch);
sch               109 net/sched/sch_mq.c 	struct mq_sched *priv = qdisc_priv(sch);
sch               128 net/sched/sch_mq.c static int mq_dump(struct Qdisc *sch, struct sk_buff *skb)
sch               130 net/sched/sch_mq.c 	struct net_device *dev = qdisc_dev(sch);
sch               135 net/sched/sch_mq.c 	sch->q.qlen = 0;
sch               136 net/sched/sch_mq.c 	memset(&sch->bstats, 0, sizeof(sch->bstats));
sch               137 net/sched/sch_mq.c 	memset(&sch->qstats, 0, sizeof(sch->qstats));
sch               150 net/sched/sch_mq.c 			__gnet_stats_copy_basic(NULL, &sch->bstats,
sch               153 net/sched/sch_mq.c 			__gnet_stats_copy_queue(&sch->qstats,
sch               156 net/sched/sch_mq.c 			sch->q.qlen		+= qlen;
sch               158 net/sched/sch_mq.c 			sch->q.qlen		+= qdisc->q.qlen;
sch               159 net/sched/sch_mq.c 			sch->bstats.bytes	+= qdisc->bstats.bytes;
sch               160 net/sched/sch_mq.c 			sch->bstats.packets	+= qdisc->bstats.packets;
sch               161 net/sched/sch_mq.c 			sch->qstats.qlen	+= qdisc->qstats.qlen;
sch               162 net/sched/sch_mq.c 			sch->qstats.backlog	+= qdisc->qstats.backlog;
sch               163 net/sched/sch_mq.c 			sch->qstats.drops	+= qdisc->qstats.drops;
sch               164 net/sched/sch_mq.c 			sch->qstats.requeues	+= qdisc->qstats.requeues;
sch               165 net/sched/sch_mq.c 			sch->qstats.overlimits	+= qdisc->qstats.overlimits;
sch               171 net/sched/sch_mq.c 	return mq_offload_stats(sch);
sch               174 net/sched/sch_mq.c static struct netdev_queue *mq_queue_get(struct Qdisc *sch, unsigned long cl)
sch               176 net/sched/sch_mq.c 	struct net_device *dev = qdisc_dev(sch);
sch               184 net/sched/sch_mq.c static struct netdev_queue *mq_select_queue(struct Qdisc *sch,
sch               187 net/sched/sch_mq.c 	return mq_queue_get(sch, TC_H_MIN(tcm->tcm_parent));
sch               190 net/sched/sch_mq.c static int mq_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
sch               193 net/sched/sch_mq.c 	struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
sch               195 net/sched/sch_mq.c 	struct net_device *dev = qdisc_dev(sch);
sch               206 net/sched/sch_mq.c 	graft_offload.handle = sch->handle;
sch               211 net/sched/sch_mq.c 	qdisc_offload_graft_helper(qdisc_dev(sch), sch, new, *old,
sch               216 net/sched/sch_mq.c static struct Qdisc *mq_leaf(struct Qdisc *sch, unsigned long cl)
sch               218 net/sched/sch_mq.c 	struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
sch               223 net/sched/sch_mq.c static unsigned long mq_find(struct Qdisc *sch, u32 classid)
sch               227 net/sched/sch_mq.c 	if (!mq_queue_get(sch, ntx))
sch               232 net/sched/sch_mq.c static int mq_dump_class(struct Qdisc *sch, unsigned long cl,
sch               235 net/sched/sch_mq.c 	struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
sch               243 net/sched/sch_mq.c static int mq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
sch               246 net/sched/sch_mq.c 	struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
sch               248 net/sched/sch_mq.c 	sch = dev_queue->qdisc_sleeping;
sch               249 net/sched/sch_mq.c 	if (gnet_stats_copy_basic(&sch->running, d, sch->cpu_bstats,
sch               250 net/sched/sch_mq.c 				  &sch->bstats) < 0 ||
sch               251 net/sched/sch_mq.c 	    qdisc_qstats_copy(d, sch) < 0)
sch               256 net/sched/sch_mq.c static void mq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
sch               258 net/sched/sch_mq.c 	struct net_device *dev = qdisc_dev(sch);
sch               266 net/sched/sch_mq.c 		if (arg->fn(sch, ntx + 1, arg) < 0) {
sch                30 net/sched/sch_mqprio.c static void mqprio_destroy(struct Qdisc *sch)
sch                32 net/sched/sch_mqprio.c 	struct net_device *dev = qdisc_dev(sch);
sch                33 net/sched/sch_mqprio.c 	struct mqprio_sched *priv = qdisc_priv(sch);
sch               133 net/sched/sch_mqprio.c static int mqprio_init(struct Qdisc *sch, struct nlattr *opt,
sch               136 net/sched/sch_mqprio.c 	struct net_device *dev = qdisc_dev(sch);
sch               137 net/sched/sch_mqprio.c 	struct mqprio_sched *priv = qdisc_priv(sch);
sch               150 net/sched/sch_mqprio.c 	if (sch->parent != TC_H_ROOT)
sch               230 net/sched/sch_mqprio.c 					  TC_H_MAKE(TC_H_MAJ(sch->handle),
sch               285 net/sched/sch_mqprio.c 	sch->flags |= TCQ_F_MQROOT;
sch               289 net/sched/sch_mqprio.c static void mqprio_attach(struct Qdisc *sch)
sch               291 net/sched/sch_mqprio.c 	struct net_device *dev = qdisc_dev(sch);
sch               292 net/sched/sch_mqprio.c 	struct mqprio_sched *priv = qdisc_priv(sch);
sch               309 net/sched/sch_mqprio.c static struct netdev_queue *mqprio_queue_get(struct Qdisc *sch,
sch               312 net/sched/sch_mqprio.c 	struct net_device *dev = qdisc_dev(sch);
sch               320 net/sched/sch_mqprio.c static int mqprio_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
sch               323 net/sched/sch_mqprio.c 	struct net_device *dev = qdisc_dev(sch);
sch               324 net/sched/sch_mqprio.c 	struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
sch               383 net/sched/sch_mqprio.c static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb)
sch               385 net/sched/sch_mqprio.c 	struct net_device *dev = qdisc_dev(sch);
sch               386 net/sched/sch_mqprio.c 	struct mqprio_sched *priv = qdisc_priv(sch);
sch               392 net/sched/sch_mqprio.c 	sch->q.qlen = 0;
sch               393 net/sched/sch_mqprio.c 	memset(&sch->bstats, 0, sizeof(sch->bstats));
sch               394 net/sched/sch_mqprio.c 	memset(&sch->qstats, 0, sizeof(sch->qstats));
sch               408 net/sched/sch_mqprio.c 			__gnet_stats_copy_basic(NULL, &sch->bstats,
sch               411 net/sched/sch_mqprio.c 			__gnet_stats_copy_queue(&sch->qstats,
sch               414 net/sched/sch_mqprio.c 			sch->q.qlen		+= qlen;
sch               416 net/sched/sch_mqprio.c 			sch->q.qlen		+= qdisc->q.qlen;
sch               417 net/sched/sch_mqprio.c 			sch->bstats.bytes	+= qdisc->bstats.bytes;
sch               418 net/sched/sch_mqprio.c 			sch->bstats.packets	+= qdisc->bstats.packets;
sch               419 net/sched/sch_mqprio.c 			sch->qstats.backlog	+= qdisc->qstats.backlog;
sch               420 net/sched/sch_mqprio.c 			sch->qstats.drops	+= qdisc->qstats.drops;
sch               421 net/sched/sch_mqprio.c 			sch->qstats.requeues	+= qdisc->qstats.requeues;
sch               422 net/sched/sch_mqprio.c 			sch->qstats.overlimits	+= qdisc->qstats.overlimits;
sch               459 net/sched/sch_mqprio.c static struct Qdisc *mqprio_leaf(struct Qdisc *sch, unsigned long cl)
sch               461 net/sched/sch_mqprio.c 	struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
sch               469 net/sched/sch_mqprio.c static unsigned long mqprio_find(struct Qdisc *sch, u32 classid)
sch               471 net/sched/sch_mqprio.c 	struct net_device *dev = qdisc_dev(sch);
sch               488 net/sched/sch_mqprio.c static int mqprio_dump_class(struct Qdisc *sch, unsigned long cl,
sch               492 net/sched/sch_mqprio.c 		struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
sch               493 net/sched/sch_mqprio.c 		struct net_device *dev = qdisc_dev(sch);
sch               497 net/sched/sch_mqprio.c 			TC_H_MAKE(TC_H_MAJ(sch->handle),
sch               508 net/sched/sch_mqprio.c static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
sch               518 net/sched/sch_mqprio.c 		struct net_device *dev = qdisc_dev(sch);
sch               542 net/sched/sch_mqprio.c 			__gnet_stats_copy_basic(NULL, &sch->bstats,
sch               544 net/sched/sch_mqprio.c 			__gnet_stats_copy_queue(&sch->qstats,
sch               558 net/sched/sch_mqprio.c 		struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
sch               560 net/sched/sch_mqprio.c 		sch = dev_queue->qdisc_sleeping;
sch               561 net/sched/sch_mqprio.c 		if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), d,
sch               562 net/sched/sch_mqprio.c 					  sch->cpu_bstats, &sch->bstats) < 0 ||
sch               563 net/sched/sch_mqprio.c 		    qdisc_qstats_copy(d, sch) < 0)
sch               569 net/sched/sch_mqprio.c static void mqprio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
sch               571 net/sched/sch_mqprio.c 	struct net_device *dev = qdisc_dev(sch);
sch               580 net/sched/sch_mqprio.c 		if (arg->fn(sch, ntx + TC_H_MIN_PRIORITY, arg) < 0) {
sch               595 net/sched/sch_mqprio.c 		if (arg->fn(sch, ntx + 1, arg) < 0) {
sch               603 net/sched/sch_mqprio.c static struct netdev_queue *mqprio_select_queue(struct Qdisc *sch,
sch               606 net/sched/sch_mqprio.c 	return mqprio_queue_get(sch, TC_H_MIN(tcm->tcm_parent));
sch                30 net/sched/sch_multiq.c multiq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
sch                32 net/sched/sch_multiq.c 	struct multiq_sched_data *q = qdisc_priv(sch);
sch                60 net/sched/sch_multiq.c multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
sch                66 net/sched/sch_multiq.c 	qdisc = multiq_classify(skb, sch, &ret);
sch                71 net/sched/sch_multiq.c 			qdisc_qstats_drop(sch);
sch                79 net/sched/sch_multiq.c 		sch->q.qlen++;
sch                83 net/sched/sch_multiq.c 		qdisc_qstats_drop(sch);
sch                87 net/sched/sch_multiq.c static struct sk_buff *multiq_dequeue(struct Qdisc *sch)
sch                89 net/sched/sch_multiq.c 	struct multiq_sched_data *q = qdisc_priv(sch);
sch               104 net/sched/sch_multiq.c 		    netdev_get_tx_queue(qdisc_dev(sch), q->curband))) {
sch               108 net/sched/sch_multiq.c 				qdisc_bstats_update(sch, skb);
sch               109 net/sched/sch_multiq.c 				sch->q.qlen--;
sch               118 net/sched/sch_multiq.c static struct sk_buff *multiq_peek(struct Qdisc *sch)
sch               120 net/sched/sch_multiq.c 	struct multiq_sched_data *q = qdisc_priv(sch);
sch               136 net/sched/sch_multiq.c 		    netdev_get_tx_queue(qdisc_dev(sch), curband))) {
sch               148 net/sched/sch_multiq.c multiq_reset(struct Qdisc *sch)
sch               151 net/sched/sch_multiq.c 	struct multiq_sched_data *q = qdisc_priv(sch);
sch               155 net/sched/sch_multiq.c 	sch->q.qlen = 0;
sch               160 net/sched/sch_multiq.c multiq_destroy(struct Qdisc *sch)
sch               163 net/sched/sch_multiq.c 	struct multiq_sched_data *q = qdisc_priv(sch);
sch               172 net/sched/sch_multiq.c static int multiq_tune(struct Qdisc *sch, struct nlattr *opt,
sch               175 net/sched/sch_multiq.c 	struct multiq_sched_data *q = qdisc_priv(sch);
sch               180 net/sched/sch_multiq.c 	if (!netif_is_multiqueue(qdisc_dev(sch)))
sch               187 net/sched/sch_multiq.c 	qopt->bands = qdisc_dev(sch)->real_num_tx_queues;
sch               194 net/sched/sch_multiq.c 	sch_tree_lock(sch);
sch               206 net/sched/sch_multiq.c 	sch_tree_unlock(sch);
sch               215 net/sched/sch_multiq.c 			child = qdisc_create_dflt(sch->dev_queue,
sch               217 net/sched/sch_multiq.c 						  TC_H_MAKE(sch->handle,
sch               220 net/sched/sch_multiq.c 				sch_tree_lock(sch);
sch               228 net/sched/sch_multiq.c 				sch_tree_unlock(sch);
sch               236 net/sched/sch_multiq.c static int multiq_init(struct Qdisc *sch, struct nlattr *opt,
sch               239 net/sched/sch_multiq.c 	struct multiq_sched_data *q = qdisc_priv(sch);
sch               247 net/sched/sch_multiq.c 	err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
sch               251 net/sched/sch_multiq.c 	q->max_bands = qdisc_dev(sch)->num_tx_queues;
sch               259 net/sched/sch_multiq.c 	return multiq_tune(sch, opt, extack);
sch               262 net/sched/sch_multiq.c static int multiq_dump(struct Qdisc *sch, struct sk_buff *skb)
sch               264 net/sched/sch_multiq.c 	struct multiq_sched_data *q = qdisc_priv(sch);
sch               281 net/sched/sch_multiq.c static int multiq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
sch               284 net/sched/sch_multiq.c 	struct multiq_sched_data *q = qdisc_priv(sch);
sch               290 net/sched/sch_multiq.c 	*old = qdisc_replace(sch, new, &q->queues[band]);
sch               295 net/sched/sch_multiq.c multiq_leaf(struct Qdisc *sch, unsigned long arg)
sch               297 net/sched/sch_multiq.c 	struct multiq_sched_data *q = qdisc_priv(sch);
sch               303 net/sched/sch_multiq.c static unsigned long multiq_find(struct Qdisc *sch, u32 classid)
sch               305 net/sched/sch_multiq.c 	struct multiq_sched_data *q = qdisc_priv(sch);
sch               313 net/sched/sch_multiq.c static unsigned long multiq_bind(struct Qdisc *sch, unsigned long parent,
sch               316 net/sched/sch_multiq.c 	return multiq_find(sch, classid);
sch               324 net/sched/sch_multiq.c static int multiq_dump_class(struct Qdisc *sch, unsigned long cl,
sch               327 net/sched/sch_multiq.c 	struct multiq_sched_data *q = qdisc_priv(sch);
sch               334 net/sched/sch_multiq.c static int multiq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
sch               337 net/sched/sch_multiq.c 	struct multiq_sched_data *q = qdisc_priv(sch);
sch               341 net/sched/sch_multiq.c 	if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
sch               349 net/sched/sch_multiq.c static void multiq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
sch               351 net/sched/sch_multiq.c 	struct multiq_sched_data *q = qdisc_priv(sch);
sch               362 net/sched/sch_multiq.c 		if (arg->fn(sch, band + 1, arg) < 0) {
sch               370 net/sched/sch_multiq.c static struct tcf_block *multiq_tcf_block(struct Qdisc *sch, unsigned long cl,
sch               373 net/sched/sch_multiq.c 	struct multiq_sched_data *q = qdisc_priv(sch);
sch               360 net/sched/sch_netem.c static void tfifo_reset(struct Qdisc *sch)
sch               362 net/sched/sch_netem.c 	struct netem_sched_data *q = qdisc_priv(sch);
sch               378 net/sched/sch_netem.c static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
sch               380 net/sched/sch_netem.c 	struct netem_sched_data *q = qdisc_priv(sch);
sch               405 net/sched/sch_netem.c 	sch->q.qlen++;
sch               412 net/sched/sch_netem.c static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch,
sch               421 net/sched/sch_netem.c 		qdisc_drop(skb, sch, to_free);
sch               434 net/sched/sch_netem.c static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
sch               437 net/sched/sch_netem.c 	struct netem_sched_data *q = qdisc_priv(sch);
sch               457 net/sched/sch_netem.c 			qdisc_qstats_drop(sch); /* mark packet */
sch               462 net/sched/sch_netem.c 		qdisc_qstats_drop(sch);
sch               479 net/sched/sch_netem.c 		struct Qdisc *rootq = qdisc_root_bh(sch);
sch               496 net/sched/sch_netem.c 			skb = netem_segment(skb, sch, to_free);
sch               506 net/sched/sch_netem.c 			qdisc_qstats_drop(sch);
sch               511 net/sched/sch_netem.c 			qdisc_drop(skb, sch, to_free);
sch               520 net/sched/sch_netem.c 	if (unlikely(sch->q.qlen >= sch->limit)) {
sch               523 net/sched/sch_netem.c 		qdisc_drop_all(skb, sch, to_free);
sch               527 net/sched/sch_netem.c 	qdisc_qstats_backlog_inc(sch, skb);
sch               544 net/sched/sch_netem.c 			if (sch->q.tail)
sch               545 net/sched/sch_netem.c 				last = netem_skb_cb(sch->q.tail);
sch               581 net/sched/sch_netem.c 		tfifo_enqueue(skb, sch);
sch               590 net/sched/sch_netem.c 		__qdisc_enqueue_head(skb, &sch->q);
sch               591 net/sched/sch_netem.c 		sch->qstats.requeues++;
sch               607 net/sched/sch_netem.c 			rc = qdisc_enqueue(segs, sch, to_free);
sch               610 net/sched/sch_netem.c 					qdisc_qstats_drop(sch);
sch               618 net/sched/sch_netem.c 		qdisc_tree_reduce_backlog(sch, -(nb - 1), -(len - prev_len));
sch               676 net/sched/sch_netem.c static struct sk_buff *netem_dequeue(struct Qdisc *sch)
sch               678 net/sched/sch_netem.c 	struct netem_sched_data *q = qdisc_priv(sch);
sch               682 net/sched/sch_netem.c 	skb = __qdisc_dequeue_head(&sch->q);
sch               684 net/sched/sch_netem.c 		qdisc_qstats_backlog_dec(sch, skb);
sch               686 net/sched/sch_netem.c 		qdisc_bstats_update(sch, skb);
sch               701 net/sched/sch_netem.c 			sch->q.qlen--;
sch               702 net/sched/sch_netem.c 			qdisc_qstats_backlog_dec(sch, skb);
sch               708 net/sched/sch_netem.c 			skb->dev = qdisc_dev(sch);
sch               727 net/sched/sch_netem.c 					qdisc_qstats_drop(sch);
sch               728 net/sched/sch_netem.c 					qdisc_tree_reduce_backlog(sch, 1,
sch               755 net/sched/sch_netem.c static void netem_reset(struct Qdisc *sch)
sch               757 net/sched/sch_netem.c 	struct netem_sched_data *q = qdisc_priv(sch);
sch               759 net/sched/sch_netem.c 	qdisc_reset_queue(sch);
sch               760 net/sched/sch_netem.c 	tfifo_reset(sch);
sch               776 net/sched/sch_netem.c static int get_dist_table(struct Qdisc *sch, struct disttable **tbl,
sch               796 net/sched/sch_netem.c 	root_lock = qdisc_root_sleeping_lock(sch);
sch               950 net/sched/sch_netem.c static int netem_change(struct Qdisc *sch, struct nlattr *opt,
sch               953 net/sched/sch_netem.c 	struct netem_sched_data *q = qdisc_priv(sch);
sch               983 net/sched/sch_netem.c 		ret = get_dist_table(sch, &q->delay_dist,
sch               990 net/sched/sch_netem.c 		ret = get_dist_table(sch, &q->slot_dist,
sch               996 net/sched/sch_netem.c 	sch->limit = qopt->limit;
sch              1052 net/sched/sch_netem.c static int netem_init(struct Qdisc *sch, struct nlattr *opt,
sch              1055 net/sched/sch_netem.c 	struct netem_sched_data *q = qdisc_priv(sch);
sch              1058 net/sched/sch_netem.c 	qdisc_watchdog_init(&q->watchdog, sch);
sch              1064 net/sched/sch_netem.c 	ret = netem_change(sch, opt, extack);
sch              1070 net/sched/sch_netem.c static void netem_destroy(struct Qdisc *sch)
sch              1072 net/sched/sch_netem.c 	struct netem_sched_data *q = qdisc_priv(sch);
sch              1131 net/sched/sch_netem.c static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
sch              1133 net/sched/sch_netem.c 	const struct netem_sched_data *q = qdisc_priv(sch);
sch              1213 net/sched/sch_netem.c static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
sch              1216 net/sched/sch_netem.c 	struct netem_sched_data *q = qdisc_priv(sch);
sch              1227 net/sched/sch_netem.c static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
sch              1230 net/sched/sch_netem.c 	struct netem_sched_data *q = qdisc_priv(sch);
sch              1232 net/sched/sch_netem.c 	*old = qdisc_replace(sch, new, &q->qdisc);
sch              1236 net/sched/sch_netem.c static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
sch              1238 net/sched/sch_netem.c 	struct netem_sched_data *q = qdisc_priv(sch);
sch              1242 net/sched/sch_netem.c static unsigned long netem_find(struct Qdisc *sch, u32 classid)
sch              1247 net/sched/sch_netem.c static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
sch              1251 net/sched/sch_netem.c 			if (walker->fn(sch, 1, walker) < 0) {
sch                68 net/sched/sch_pie.c 	struct Qdisc *sch;
sch                92 net/sched/sch_pie.c static bool drop_early(struct Qdisc *sch, u32 packet_size)
sch                94 net/sched/sch_pie.c 	struct pie_sched_data *q = qdisc_priv(sch);
sch                97 net/sched/sch_pie.c 	u32 mtu = psched_mtu(qdisc_dev(sch));
sch               113 net/sched/sch_pie.c 	if (sch->qstats.backlog < 2 * mtu)
sch               151 net/sched/sch_pie.c static int pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
sch               154 net/sched/sch_pie.c 	struct pie_sched_data *q = qdisc_priv(sch);
sch               157 net/sched/sch_pie.c 	if (unlikely(qdisc_qlen(sch) >= sch->limit)) {
sch               162 net/sched/sch_pie.c 	if (!drop_early(sch, skb->len)) {
sch               176 net/sched/sch_pie.c 		if (qdisc_qlen(sch) > q->stats.maxq)
sch               177 net/sched/sch_pie.c 			q->stats.maxq = qdisc_qlen(sch);
sch               179 net/sched/sch_pie.c 		return qdisc_enqueue_tail(skb, sch);
sch               186 net/sched/sch_pie.c 	return qdisc_drop(skb, sch, to_free);
sch               199 net/sched/sch_pie.c static int pie_change(struct Qdisc *sch, struct nlattr *opt,
sch               202 net/sched/sch_pie.c 	struct pie_sched_data *q = qdisc_priv(sch);
sch               215 net/sched/sch_pie.c 	sch_tree_lock(sch);
sch               235 net/sched/sch_pie.c 		sch->limit = limit;
sch               251 net/sched/sch_pie.c 	qlen = sch->q.qlen;
sch               252 net/sched/sch_pie.c 	while (sch->q.qlen > sch->limit) {
sch               253 net/sched/sch_pie.c 		struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
sch               256 net/sched/sch_pie.c 		qdisc_qstats_backlog_dec(sch, skb);
sch               257 net/sched/sch_pie.c 		rtnl_qdisc_drop(skb, sch);
sch               259 net/sched/sch_pie.c 	qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
sch               261 net/sched/sch_pie.c 	sch_tree_unlock(sch);
sch               265 net/sched/sch_pie.c static void pie_process_dequeue(struct Qdisc *sch, struct sk_buff *skb)
sch               267 net/sched/sch_pie.c 	struct pie_sched_data *q = qdisc_priv(sch);
sch               268 net/sched/sch_pie.c 	int qlen = sch->qstats.backlog;	/* current queue size in bytes */
sch               330 net/sched/sch_pie.c static void calculate_probability(struct Qdisc *sch)
sch               332 net/sched/sch_pie.c 	struct pie_sched_data *q = qdisc_priv(sch);
sch               333 net/sched/sch_pie.c 	u32 qlen = sch->qstats.backlog;	/* queue size in bytes */
sch               446 net/sched/sch_pie.c 	struct Qdisc *sch = q->sch;
sch               447 net/sched/sch_pie.c 	spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));
sch               450 net/sched/sch_pie.c 	calculate_probability(sch);
sch               458 net/sched/sch_pie.c static int pie_init(struct Qdisc *sch, struct nlattr *opt,
sch               461 net/sched/sch_pie.c 	struct pie_sched_data *q = qdisc_priv(sch);
sch               465 net/sched/sch_pie.c 	sch->limit = q->params.limit;
sch               467 net/sched/sch_pie.c 	q->sch = sch;
sch               471 net/sched/sch_pie.c 		int err = pie_change(sch, opt, extack);
sch               481 net/sched/sch_pie.c static int pie_dump(struct Qdisc *sch, struct sk_buff *skb)
sch               483 net/sched/sch_pie.c 	struct pie_sched_data *q = qdisc_priv(sch);
sch               494 net/sched/sch_pie.c 	    nla_put_u32(skb, TCA_PIE_LIMIT, sch->limit) ||
sch               510 net/sched/sch_pie.c static int pie_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
sch               512 net/sched/sch_pie.c 	struct pie_sched_data *q = qdisc_priv(sch);
sch               530 net/sched/sch_pie.c static struct sk_buff *pie_qdisc_dequeue(struct Qdisc *sch)
sch               532 net/sched/sch_pie.c 	struct sk_buff *skb = qdisc_dequeue_head(sch);
sch               537 net/sched/sch_pie.c 	pie_process_dequeue(sch, skb);
sch               541 net/sched/sch_pie.c static void pie_reset(struct Qdisc *sch)
sch               543 net/sched/sch_pie.c 	struct pie_sched_data *q = qdisc_priv(sch);
sch               545 net/sched/sch_pie.c 	qdisc_reset_queue(sch);
sch               549 net/sched/sch_pie.c static void pie_destroy(struct Qdisc *sch)
sch               551 net/sched/sch_pie.c 	struct pie_sched_data *q = qdisc_priv(sch);
sch                87 net/sched/sch_plug.c static int plug_enqueue(struct sk_buff *skb, struct Qdisc *sch,
sch                90 net/sched/sch_plug.c 	struct plug_sched_data *q = qdisc_priv(sch);
sch                92 net/sched/sch_plug.c 	if (likely(sch->qstats.backlog + skb->len <= q->limit)) {
sch                95 net/sched/sch_plug.c 		return qdisc_enqueue_tail(skb, sch);
sch                98 net/sched/sch_plug.c 	return qdisc_drop(skb, sch, to_free);
sch               101 net/sched/sch_plug.c static struct sk_buff *plug_dequeue(struct Qdisc *sch)
sch               103 net/sched/sch_plug.c 	struct plug_sched_data *q = qdisc_priv(sch);
sch               119 net/sched/sch_plug.c 	return qdisc_dequeue_head(sch);
sch               122 net/sched/sch_plug.c static int plug_init(struct Qdisc *sch, struct nlattr *opt,
sch               125 net/sched/sch_plug.c 	struct plug_sched_data *q = qdisc_priv(sch);
sch               133 net/sched/sch_plug.c 		q->limit = qdisc_dev(sch)->tx_queue_len
sch               134 net/sched/sch_plug.c 		           * psched_mtu(qdisc_dev(sch));
sch               158 net/sched/sch_plug.c static int plug_change(struct Qdisc *sch, struct nlattr *opt,
sch               161 net/sched/sch_plug.c 	struct plug_sched_data *q = qdisc_priv(sch);
sch               187 net/sched/sch_plug.c 		netif_schedule_queue(sch->dev_queue);
sch               195 net/sched/sch_plug.c 		netif_schedule_queue(sch->dev_queue);
sch                31 net/sched/sch_prio.c prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
sch                33 net/sched/sch_prio.c 	struct prio_sched_data *q = qdisc_priv(sch);
sch                40 net/sched/sch_prio.c 	if (TC_H_MAJ(skb->priority) != sch->handle) {
sch                69 net/sched/sch_prio.c prio_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
sch                75 net/sched/sch_prio.c 	qdisc = prio_classify(skb, sch, &ret);
sch                80 net/sched/sch_prio.c 			qdisc_qstats_drop(sch);
sch                88 net/sched/sch_prio.c 		sch->qstats.backlog += len;
sch                89 net/sched/sch_prio.c 		sch->q.qlen++;
sch                93 net/sched/sch_prio.c 		qdisc_qstats_drop(sch);
sch                97 net/sched/sch_prio.c static struct sk_buff *prio_peek(struct Qdisc *sch)
sch                99 net/sched/sch_prio.c 	struct prio_sched_data *q = qdisc_priv(sch);
sch               111 net/sched/sch_prio.c static struct sk_buff *prio_dequeue(struct Qdisc *sch)
sch               113 net/sched/sch_prio.c 	struct prio_sched_data *q = qdisc_priv(sch);
sch               120 net/sched/sch_prio.c 			qdisc_bstats_update(sch, skb);
sch               121 net/sched/sch_prio.c 			qdisc_qstats_backlog_dec(sch, skb);
sch               122 net/sched/sch_prio.c 			sch->q.qlen--;
sch               131 net/sched/sch_prio.c prio_reset(struct Qdisc *sch)
sch               134 net/sched/sch_prio.c 	struct prio_sched_data *q = qdisc_priv(sch);
sch               138 net/sched/sch_prio.c 	sch->qstats.backlog = 0;
sch               139 net/sched/sch_prio.c 	sch->q.qlen = 0;
sch               142 net/sched/sch_prio.c static int prio_offload(struct Qdisc *sch, struct tc_prio_qopt *qopt)
sch               144 net/sched/sch_prio.c 	struct net_device *dev = qdisc_dev(sch);
sch               146 net/sched/sch_prio.c 		.handle = sch->handle,
sch               147 net/sched/sch_prio.c 		.parent = sch->parent,
sch               158 net/sched/sch_prio.c 		opt.replace_params.qstats = &sch->qstats;
sch               167 net/sched/sch_prio.c prio_destroy(struct Qdisc *sch)
sch               170 net/sched/sch_prio.c 	struct prio_sched_data *q = qdisc_priv(sch);
sch               173 net/sched/sch_prio.c 	prio_offload(sch, NULL);
sch               178 net/sched/sch_prio.c static int prio_tune(struct Qdisc *sch, struct nlattr *opt,
sch               181 net/sched/sch_prio.c 	struct prio_sched_data *q = qdisc_priv(sch);
sch               200 net/sched/sch_prio.c 		queues[i] = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
sch               201 net/sched/sch_prio.c 					      TC_H_MAKE(sch->handle, i + 1),
sch               210 net/sched/sch_prio.c 	prio_offload(sch, qopt);
sch               211 net/sched/sch_prio.c 	sch_tree_lock(sch);
sch               224 net/sched/sch_prio.c 	sch_tree_unlock(sch);
sch               231 net/sched/sch_prio.c static int prio_init(struct Qdisc *sch, struct nlattr *opt,
sch               234 net/sched/sch_prio.c 	struct prio_sched_data *q = qdisc_priv(sch);
sch               240 net/sched/sch_prio.c 	err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
sch               244 net/sched/sch_prio.c 	return prio_tune(sch, opt, extack);
sch               247 net/sched/sch_prio.c static int prio_dump_offload(struct Qdisc *sch)
sch               251 net/sched/sch_prio.c 		.handle = sch->handle,
sch               252 net/sched/sch_prio.c 		.parent = sch->parent,
sch               255 net/sched/sch_prio.c 				.bstats = &sch->bstats,
sch               256 net/sched/sch_prio.c 				.qstats = &sch->qstats,
sch               261 net/sched/sch_prio.c 	return qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_PRIO, &hw_stats);
sch               264 net/sched/sch_prio.c static int prio_dump(struct Qdisc *sch, struct sk_buff *skb)
sch               266 net/sched/sch_prio.c 	struct prio_sched_data *q = qdisc_priv(sch);
sch               274 net/sched/sch_prio.c 	err = prio_dump_offload(sch);
sch               288 net/sched/sch_prio.c static int prio_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
sch               291 net/sched/sch_prio.c 	struct prio_sched_data *q = qdisc_priv(sch);
sch               296 net/sched/sch_prio.c 		new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
sch               297 net/sched/sch_prio.c 					TC_H_MAKE(sch->handle, arg), extack);
sch               304 net/sched/sch_prio.c 	*old = qdisc_replace(sch, new, &q->queues[band]);
sch               306 net/sched/sch_prio.c 	graft_offload.handle = sch->handle;
sch               307 net/sched/sch_prio.c 	graft_offload.parent = sch->parent;
sch               312 net/sched/sch_prio.c 	qdisc_offload_graft_helper(qdisc_dev(sch), sch, new, *old,
sch               319 net/sched/sch_prio.c prio_leaf(struct Qdisc *sch, unsigned long arg)
sch               321 net/sched/sch_prio.c 	struct prio_sched_data *q = qdisc_priv(sch);
sch               327 net/sched/sch_prio.c static unsigned long prio_find(struct Qdisc *sch, u32 classid)
sch               329 net/sched/sch_prio.c 	struct prio_sched_data *q = qdisc_priv(sch);
sch               337 net/sched/sch_prio.c static unsigned long prio_bind(struct Qdisc *sch, unsigned long parent, u32 classid)
sch               339 net/sched/sch_prio.c 	return prio_find(sch, classid);
sch               347 net/sched/sch_prio.c static int prio_dump_class(struct Qdisc *sch, unsigned long cl, struct sk_buff *skb,
sch               350 net/sched/sch_prio.c 	struct prio_sched_data *q = qdisc_priv(sch);
sch               357 net/sched/sch_prio.c static int prio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
sch               360 net/sched/sch_prio.c 	struct prio_sched_data *q = qdisc_priv(sch);
sch               364 net/sched/sch_prio.c 	if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
sch               372 net/sched/sch_prio.c static void prio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
sch               374 net/sched/sch_prio.c 	struct prio_sched_data *q = qdisc_priv(sch);
sch               385 net/sched/sch_prio.c 		if (arg->fn(sch, prio + 1, arg) < 0) {
sch               393 net/sched/sch_prio.c static struct tcf_block *prio_tcf_block(struct Qdisc *sch, unsigned long cl,
sch               396 net/sched/sch_prio.c 	struct prio_sched_data *q = qdisc_priv(sch);
sch               206 net/sched/sch_qfq.c static struct qfq_class *qfq_find_class(struct Qdisc *sch, u32 classid)
sch               208 net/sched/sch_qfq.c 	struct qfq_sched *q = qdisc_priv(sch);
sch               374 net/sched/sch_qfq.c static int qfq_change_agg(struct Qdisc *sch, struct qfq_class *cl, u32 weight,
sch               377 net/sched/sch_qfq.c 	struct qfq_sched *q = qdisc_priv(sch);
sch               392 net/sched/sch_qfq.c static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
sch               396 net/sched/sch_qfq.c 	struct qfq_sched *q = qdisc_priv(sch);
sch               431 net/sched/sch_qfq.c 		lmax = psched_mtu(qdisc_dev(sch));
sch               454 net/sched/sch_qfq.c 						    qdisc_root_sleeping_running(sch),
sch               471 net/sched/sch_qfq.c 	cl->qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
sch               480 net/sched/sch_qfq.c 					qdisc_root_sleeping_running(sch),
sch               488 net/sched/sch_qfq.c 	sch_tree_lock(sch);
sch               490 net/sched/sch_qfq.c 	sch_tree_unlock(sch);
sch               492 net/sched/sch_qfq.c 	qdisc_class_hash_grow(sch, &q->clhash);
sch               495 net/sched/sch_qfq.c 	sch_tree_lock(sch);
sch               498 net/sched/sch_qfq.c 		sch_tree_unlock(sch);
sch               505 net/sched/sch_qfq.c 		sch_tree_lock(sch);
sch               511 net/sched/sch_qfq.c 	sch_tree_unlock(sch);
sch               522 net/sched/sch_qfq.c static void qfq_destroy_class(struct Qdisc *sch, struct qfq_class *cl)
sch               524 net/sched/sch_qfq.c 	struct qfq_sched *q = qdisc_priv(sch);
sch               532 net/sched/sch_qfq.c static int qfq_delete_class(struct Qdisc *sch, unsigned long arg)
sch               534 net/sched/sch_qfq.c 	struct qfq_sched *q = qdisc_priv(sch);
sch               540 net/sched/sch_qfq.c 	sch_tree_lock(sch);
sch               545 net/sched/sch_qfq.c 	sch_tree_unlock(sch);
sch               547 net/sched/sch_qfq.c 	qfq_destroy_class(sch, cl);
sch               551 net/sched/sch_qfq.c static unsigned long qfq_search_class(struct Qdisc *sch, u32 classid)
sch               553 net/sched/sch_qfq.c 	return (unsigned long)qfq_find_class(sch, classid);
sch               556 net/sched/sch_qfq.c static struct tcf_block *qfq_tcf_block(struct Qdisc *sch, unsigned long cl,
sch               559 net/sched/sch_qfq.c 	struct qfq_sched *q = qdisc_priv(sch);
sch               567 net/sched/sch_qfq.c static unsigned long qfq_bind_tcf(struct Qdisc *sch, unsigned long parent,
sch               570 net/sched/sch_qfq.c 	struct qfq_class *cl = qfq_find_class(sch, classid);
sch               578 net/sched/sch_qfq.c static void qfq_unbind_tcf(struct Qdisc *sch, unsigned long arg)
sch               585 net/sched/sch_qfq.c static int qfq_graft_class(struct Qdisc *sch, unsigned long arg,
sch               592 net/sched/sch_qfq.c 		new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
sch               598 net/sched/sch_qfq.c 	*old = qdisc_replace(sch, new, &cl->qdisc);
sch               602 net/sched/sch_qfq.c static struct Qdisc *qfq_class_leaf(struct Qdisc *sch, unsigned long arg)
sch               609 net/sched/sch_qfq.c static int qfq_dump_class(struct Qdisc *sch, unsigned long arg,
sch               632 net/sched/sch_qfq.c static int qfq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
sch               643 net/sched/sch_qfq.c 	if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
sch               652 net/sched/sch_qfq.c static void qfq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
sch               654 net/sched/sch_qfq.c 	struct qfq_sched *q = qdisc_priv(sch);
sch               667 net/sched/sch_qfq.c 			if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
sch               676 net/sched/sch_qfq.c static struct qfq_class *qfq_classify(struct sk_buff *skb, struct Qdisc *sch,
sch               679 net/sched/sch_qfq.c 	struct qfq_sched *q = qdisc_priv(sch);
sch               685 net/sched/sch_qfq.c 	if (TC_H_MAJ(skb->priority ^ sch->handle) == 0) {
sch               687 net/sched/sch_qfq.c 		cl = qfq_find_class(sch, skb->priority);
sch               709 net/sched/sch_qfq.c 			cl = qfq_find_class(sch, res.classid);
sch              1077 net/sched/sch_qfq.c static struct sk_buff *qfq_dequeue(struct Qdisc *sch)
sch              1079 net/sched/sch_qfq.c 	struct qfq_sched *q = qdisc_priv(sch);
sch              1117 net/sched/sch_qfq.c 		} else if (sch->q.qlen == 0) { /* no aggregate to serve */
sch              1132 net/sched/sch_qfq.c 	qdisc_qstats_backlog_dec(sch, skb);
sch              1133 net/sched/sch_qfq.c 	sch->q.qlen--;
sch              1134 net/sched/sch_qfq.c 	qdisc_bstats_update(sch, skb);
sch              1197 net/sched/sch_qfq.c static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
sch              1201 net/sched/sch_qfq.c 	struct qfq_sched *q = qdisc_priv(sch);
sch              1207 net/sched/sch_qfq.c 	cl = qfq_classify(skb, sch, &err);
sch              1210 net/sched/sch_qfq.c 			qdisc_qstats_drop(sch);
sch              1219 net/sched/sch_qfq.c 		err = qfq_change_agg(sch, cl, cl->agg->class_weight, len);
sch              1222 net/sched/sch_qfq.c 			return qdisc_drop(skb, sch, to_free);
sch              1233 net/sched/sch_qfq.c 			qdisc_qstats_drop(sch);
sch              1240 net/sched/sch_qfq.c 	sch->qstats.backlog += len;
sch              1241 net/sched/sch_qfq.c 	++sch->q.qlen;
sch              1402 net/sched/sch_qfq.c static void qfq_qlen_notify(struct Qdisc *sch, unsigned long arg)
sch              1404 net/sched/sch_qfq.c 	struct qfq_sched *q = qdisc_priv(sch);
sch              1410 net/sched/sch_qfq.c static int qfq_init_qdisc(struct Qdisc *sch, struct nlattr *opt,
sch              1413 net/sched/sch_qfq.c 	struct qfq_sched *q = qdisc_priv(sch);
sch              1418 net/sched/sch_qfq.c 	err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
sch              1426 net/sched/sch_qfq.c 	if (qdisc_dev(sch)->tx_queue_len + 1 > QFQ_MAX_AGG_CLASSES)
sch              1429 net/sched/sch_qfq.c 		max_classes = qdisc_dev(sch)->tx_queue_len + 1;
sch              1451 net/sched/sch_qfq.c static void qfq_reset_qdisc(struct Qdisc *sch)
sch              1453 net/sched/sch_qfq.c 	struct qfq_sched *q = qdisc_priv(sch);
sch              1465 net/sched/sch_qfq.c 	sch->qstats.backlog = 0;
sch              1466 net/sched/sch_qfq.c 	sch->q.qlen = 0;
sch              1469 net/sched/sch_qfq.c static void qfq_destroy_qdisc(struct Qdisc *sch)
sch              1471 net/sched/sch_qfq.c 	struct qfq_sched *q = qdisc_priv(sch);
sch              1481 net/sched/sch_qfq.c 			qfq_destroy_class(sch, cl);
sch                40 net/sched/sch_red.c 	struct Qdisc		*sch;
sch                57 net/sched/sch_red.c static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch,
sch                60 net/sched/sch_red.c 	struct red_sched_data *q = qdisc_priv(sch);
sch                76 net/sched/sch_red.c 		qdisc_qstats_overlimit(sch);
sch                86 net/sched/sch_red.c 		qdisc_qstats_overlimit(sch);
sch                99 net/sched/sch_red.c 		qdisc_qstats_backlog_inc(sch, skb);
sch               100 net/sched/sch_red.c 		sch->q.qlen++;
sch               103 net/sched/sch_red.c 		qdisc_qstats_drop(sch);
sch               108 net/sched/sch_red.c 	qdisc_drop(skb, sch, to_free);
sch               112 net/sched/sch_red.c static struct sk_buff *red_dequeue(struct Qdisc *sch)
sch               115 net/sched/sch_red.c 	struct red_sched_data *q = qdisc_priv(sch);
sch               120 net/sched/sch_red.c 		qdisc_bstats_update(sch, skb);
sch               121 net/sched/sch_red.c 		qdisc_qstats_backlog_dec(sch, skb);
sch               122 net/sched/sch_red.c 		sch->q.qlen--;
sch               130 net/sched/sch_red.c static struct sk_buff *red_peek(struct Qdisc *sch)
sch               132 net/sched/sch_red.c 	struct red_sched_data *q = qdisc_priv(sch);
sch               138 net/sched/sch_red.c static void red_reset(struct Qdisc *sch)
sch               140 net/sched/sch_red.c 	struct red_sched_data *q = qdisc_priv(sch);
sch               143 net/sched/sch_red.c 	sch->qstats.backlog = 0;
sch               144 net/sched/sch_red.c 	sch->q.qlen = 0;
sch               148 net/sched/sch_red.c static int red_offload(struct Qdisc *sch, bool enable)
sch               150 net/sched/sch_red.c 	struct red_sched_data *q = qdisc_priv(sch);
sch               151 net/sched/sch_red.c 	struct net_device *dev = qdisc_dev(sch);
sch               153 net/sched/sch_red.c 		.handle = sch->handle,
sch               154 net/sched/sch_red.c 		.parent = sch->parent,
sch               168 net/sched/sch_red.c 		opt.set.qstats = &sch->qstats;
sch               176 net/sched/sch_red.c static void red_destroy(struct Qdisc *sch)
sch               178 net/sched/sch_red.c 	struct red_sched_data *q = qdisc_priv(sch);
sch               181 net/sched/sch_red.c 	red_offload(sch, false);
sch               191 net/sched/sch_red.c static int red_change(struct Qdisc *sch, struct nlattr *opt,
sch               195 net/sched/sch_red.c 	struct red_sched_data *q = qdisc_priv(sch);
sch               220 net/sched/sch_red.c 		child = fifo_create_dflt(sch, &bfifo_qdisc_ops, ctl->limit,
sch               229 net/sched/sch_red.c 	sch_tree_lock(sch);
sch               252 net/sched/sch_red.c 	sch_tree_unlock(sch);
sch               254 net/sched/sch_red.c 	red_offload(sch, true);
sch               264 net/sched/sch_red.c 	struct Qdisc *sch = q->sch;
sch               265 net/sched/sch_red.c 	spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));
sch               273 net/sched/sch_red.c static int red_init(struct Qdisc *sch, struct nlattr *opt,
sch               276 net/sched/sch_red.c 	struct red_sched_data *q = qdisc_priv(sch);
sch               279 net/sched/sch_red.c 	q->sch = sch;
sch               281 net/sched/sch_red.c 	return red_change(sch, opt, extack);
sch               284 net/sched/sch_red.c static int red_dump_offload_stats(struct Qdisc *sch)
sch               288 net/sched/sch_red.c 		.handle = sch->handle,
sch               289 net/sched/sch_red.c 		.parent = sch->parent,
sch               291 net/sched/sch_red.c 			.stats.bstats = &sch->bstats,
sch               292 net/sched/sch_red.c 			.stats.qstats = &sch->qstats,
sch               296 net/sched/sch_red.c 	return qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_RED, &hw_stats);
sch               299 net/sched/sch_red.c static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
sch               301 net/sched/sch_red.c 	struct red_sched_data *q = qdisc_priv(sch);
sch               314 net/sched/sch_red.c 	err = red_dump_offload_stats(sch);
sch               331 net/sched/sch_red.c static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
sch               333 net/sched/sch_red.c 	struct red_sched_data *q = qdisc_priv(sch);
sch               334 net/sched/sch_red.c 	struct net_device *dev = qdisc_dev(sch);
sch               337 net/sched/sch_red.c 	if (sch->flags & TCQ_F_OFFLOADED) {
sch               340 net/sched/sch_red.c 			.handle = sch->handle,
sch               341 net/sched/sch_red.c 			.parent = sch->parent,
sch               357 net/sched/sch_red.c static int red_dump_class(struct Qdisc *sch, unsigned long cl,
sch               360 net/sched/sch_red.c 	struct red_sched_data *q = qdisc_priv(sch);
sch               367 net/sched/sch_red.c static void red_graft_offload(struct Qdisc *sch,
sch               372 net/sched/sch_red.c 		.handle		= sch->handle,
sch               373 net/sched/sch_red.c 		.parent		= sch->parent,
sch               378 net/sched/sch_red.c 	qdisc_offload_graft_helper(qdisc_dev(sch), sch, new, old,
sch               382 net/sched/sch_red.c static int red_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
sch               385 net/sched/sch_red.c 	struct red_sched_data *q = qdisc_priv(sch);
sch               390 net/sched/sch_red.c 	*old = qdisc_replace(sch, new, &q->qdisc);
sch               392 net/sched/sch_red.c 	red_graft_offload(sch, new, *old, extack);
sch               396 net/sched/sch_red.c static struct Qdisc *red_leaf(struct Qdisc *sch, unsigned long arg)
sch               398 net/sched/sch_red.c 	struct red_sched_data *q = qdisc_priv(sch);
sch               402 net/sched/sch_red.c static unsigned long red_find(struct Qdisc *sch, u32 classid)
sch               407 net/sched/sch_red.c static void red_walk(struct Qdisc *sch, struct qdisc_walker *walker)
sch               411 net/sched/sch_red.c 			if (walker->fn(sch, 1, walker) < 0) {
sch               279 net/sched/sch_sfb.c static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
sch               283 net/sched/sch_sfb.c 	struct sfb_sched_data *q = qdisc_priv(sch);
sch               293 net/sched/sch_sfb.c 	if (unlikely(sch->q.qlen >= q->limit)) {
sch               294 net/sched/sch_sfb.c 		qdisc_qstats_overlimit(sch);
sch               347 net/sched/sch_sfb.c 		qdisc_qstats_overlimit(sch);
sch               373 net/sched/sch_sfb.c 			qdisc_qstats_overlimit(sch);
sch               404 net/sched/sch_sfb.c 		qdisc_qstats_backlog_inc(sch, skb);
sch               405 net/sched/sch_sfb.c 		sch->q.qlen++;
sch               409 net/sched/sch_sfb.c 		qdisc_qstats_drop(sch);
sch               414 net/sched/sch_sfb.c 	qdisc_drop(skb, sch, to_free);
sch               418 net/sched/sch_sfb.c 		qdisc_qstats_drop(sch);
sch               423 net/sched/sch_sfb.c static struct sk_buff *sfb_dequeue(struct Qdisc *sch)
sch               425 net/sched/sch_sfb.c 	struct sfb_sched_data *q = qdisc_priv(sch);
sch               432 net/sched/sch_sfb.c 		qdisc_bstats_update(sch, skb);
sch               433 net/sched/sch_sfb.c 		qdisc_qstats_backlog_dec(sch, skb);
sch               434 net/sched/sch_sfb.c 		sch->q.qlen--;
sch               441 net/sched/sch_sfb.c static struct sk_buff *sfb_peek(struct Qdisc *sch)
sch               443 net/sched/sch_sfb.c 	struct sfb_sched_data *q = qdisc_priv(sch);
sch               451 net/sched/sch_sfb.c static void sfb_reset(struct Qdisc *sch)
sch               453 net/sched/sch_sfb.c 	struct sfb_sched_data *q = qdisc_priv(sch);
sch               456 net/sched/sch_sfb.c 	sch->qstats.backlog = 0;
sch               457 net/sched/sch_sfb.c 	sch->q.qlen = 0;
sch               464 net/sched/sch_sfb.c static void sfb_destroy(struct Qdisc *sch)
sch               466 net/sched/sch_sfb.c 	struct sfb_sched_data *q = qdisc_priv(sch);
sch               488 net/sched/sch_sfb.c static int sfb_change(struct Qdisc *sch, struct nlattr *opt,
sch               491 net/sched/sch_sfb.c 	struct sfb_sched_data *q = qdisc_priv(sch);
sch               512 net/sched/sch_sfb.c 		limit = qdisc_dev(sch)->tx_queue_len;
sch               514 net/sched/sch_sfb.c 	child = fifo_create_dflt(sch, &pfifo_qdisc_ops, limit, extack);
sch               520 net/sched/sch_sfb.c 	sch_tree_lock(sch);
sch               545 net/sched/sch_sfb.c 	sch_tree_unlock(sch);
sch               551 net/sched/sch_sfb.c static int sfb_init(struct Qdisc *sch, struct nlattr *opt,
sch               554 net/sched/sch_sfb.c 	struct sfb_sched_data *q = qdisc_priv(sch);
sch               557 net/sched/sch_sfb.c 	err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
sch               562 net/sched/sch_sfb.c 	return sfb_change(sch, opt, extack);
sch               565 net/sched/sch_sfb.c static int sfb_dump(struct Qdisc *sch, struct sk_buff *skb)
sch               567 net/sched/sch_sfb.c 	struct sfb_sched_data *q = qdisc_priv(sch);
sch               581 net/sched/sch_sfb.c 	sch->qstats.backlog = q->qdisc->qstats.backlog;
sch               594 net/sched/sch_sfb.c static int sfb_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
sch               596 net/sched/sch_sfb.c 	struct sfb_sched_data *q = qdisc_priv(sch);
sch               611 net/sched/sch_sfb.c static int sfb_dump_class(struct Qdisc *sch, unsigned long cl,
sch               617 net/sched/sch_sfb.c static int sfb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
sch               620 net/sched/sch_sfb.c 	struct sfb_sched_data *q = qdisc_priv(sch);
sch               625 net/sched/sch_sfb.c 	*old = qdisc_replace(sch, new, &q->qdisc);
sch               629 net/sched/sch_sfb.c static struct Qdisc *sfb_leaf(struct Qdisc *sch, unsigned long arg)
sch               631 net/sched/sch_sfb.c 	struct sfb_sched_data *q = qdisc_priv(sch);
sch               636 net/sched/sch_sfb.c static unsigned long sfb_find(struct Qdisc *sch, u32 classid)
sch               641 net/sched/sch_sfb.c static void sfb_unbind(struct Qdisc *sch, unsigned long arg)
sch               645 net/sched/sch_sfb.c static int sfb_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
sch               652 net/sched/sch_sfb.c static int sfb_delete(struct Qdisc *sch, unsigned long cl)
sch               657 net/sched/sch_sfb.c static void sfb_walk(struct Qdisc *sch, struct qdisc_walker *walker)
sch               661 net/sched/sch_sfb.c 			if (walker->fn(sch, 1, walker) < 0) {
sch               669 net/sched/sch_sfb.c static struct tcf_block *sfb_tcf_block(struct Qdisc *sch, unsigned long cl,
sch               672 net/sched/sch_sfb.c 	struct sfb_sched_data *q = qdisc_priv(sch);
sch               679 net/sched/sch_sfb.c static unsigned long sfb_bind(struct Qdisc *sch, unsigned long parent,
sch               144 net/sched/sch_sfq.c 	struct Qdisc	*sch;
sch               163 net/sched/sch_sfq.c static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
sch               166 net/sched/sch_sfq.c 	struct sfq_sched_data *q = qdisc_priv(sch);
sch               171 net/sched/sch_sfq.c 	if (TC_H_MAJ(skb->priority) == sch->handle &&
sch               293 net/sched/sch_sfq.c static unsigned int sfq_drop(struct Qdisc *sch, struct sk_buff **to_free)
sch               295 net/sched/sch_sfq.c 	struct sfq_sched_data *q = qdisc_priv(sch);
sch               310 net/sched/sch_sfq.c 		sch->q.qlen--;
sch               311 net/sched/sch_sfq.c 		qdisc_qstats_backlog_dec(sch, skb);
sch               312 net/sched/sch_sfq.c 		qdisc_drop(skb, sch, to_free);
sch               346 net/sched/sch_sfq.c sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
sch               348 net/sched/sch_sfq.c 	struct sfq_sched_data *q = qdisc_priv(sch);
sch               356 net/sched/sch_sfq.c 	hash = sfq_classify(skb, sch, &ret);
sch               359 net/sched/sch_sfq.c 			qdisc_qstats_drop(sch);
sch               370 net/sched/sch_sfq.c 			return qdisc_drop(skb, sch, to_free);
sch               389 net/sched/sch_sfq.c 			qdisc_qstats_overlimit(sch);
sch               406 net/sched/sch_sfq.c 			qdisc_qstats_overlimit(sch);
sch               427 net/sched/sch_sfq.c 			return qdisc_drop(skb, sch, to_free);
sch               432 net/sched/sch_sfq.c 		sch->qstats.backlog -= delta;
sch               434 net/sched/sch_sfq.c 		qdisc_drop(head, sch, to_free);
sch               437 net/sched/sch_sfq.c 		qdisc_tree_reduce_backlog(sch, 0, delta);
sch               442 net/sched/sch_sfq.c 	qdisc_qstats_backlog_inc(sch, skb);
sch               461 net/sched/sch_sfq.c 	if (++sch->q.qlen <= q->limit)
sch               465 net/sched/sch_sfq.c 	dropped = sfq_drop(sch, to_free);
sch               470 net/sched/sch_sfq.c 		qdisc_tree_reduce_backlog(sch, 0, dropped - qdisc_pkt_len(skb));
sch               475 net/sched/sch_sfq.c 	qdisc_tree_reduce_backlog(sch, 1, dropped);
sch               480 net/sched/sch_sfq.c sfq_dequeue(struct Qdisc *sch)
sch               482 net/sched/sch_sfq.c 	struct sfq_sched_data *q = qdisc_priv(sch);
sch               501 net/sched/sch_sfq.c 	qdisc_bstats_update(sch, skb);
sch               502 net/sched/sch_sfq.c 	sch->q.qlen--;
sch               503 net/sched/sch_sfq.c 	qdisc_qstats_backlog_dec(sch, skb);
sch               521 net/sched/sch_sfq.c sfq_reset(struct Qdisc *sch)
sch               525 net/sched/sch_sfq.c 	while ((skb = sfq_dequeue(sch)) != NULL)
sch               535 net/sched/sch_sfq.c static void sfq_rehash(struct Qdisc *sch)
sch               537 net/sched/sch_sfq.c 	struct sfq_sched_data *q = qdisc_priv(sch);
sch               571 net/sched/sch_sfq.c 				qdisc_qstats_backlog_dec(sch, skb);
sch               601 net/sched/sch_sfq.c 	sch->q.qlen -= dropped;
sch               602 net/sched/sch_sfq.c 	qdisc_tree_reduce_backlog(sch, dropped, drop_len);
sch               608 net/sched/sch_sfq.c 	struct Qdisc *sch = q->sch;
sch               609 net/sched/sch_sfq.c 	spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));
sch               616 net/sched/sch_sfq.c 		sfq_rehash(sch);
sch               623 net/sched/sch_sfq.c static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
sch               625 net/sched/sch_sfq.c 	struct sfq_sched_data *q = qdisc_priv(sch);
sch               657 net/sched/sch_sfq.c 	sch_tree_lock(sch);
sch               689 net/sched/sch_sfq.c 	qlen = sch->q.qlen;
sch               690 net/sched/sch_sfq.c 	while (sch->q.qlen > q->limit) {
sch               691 net/sched/sch_sfq.c 		dropped += sfq_drop(sch, &to_free);
sch               697 net/sched/sch_sfq.c 	qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
sch               704 net/sched/sch_sfq.c 	sch_tree_unlock(sch);
sch               719 net/sched/sch_sfq.c static void sfq_destroy(struct Qdisc *sch)
sch               721 net/sched/sch_sfq.c 	struct sfq_sched_data *q = qdisc_priv(sch);
sch               731 net/sched/sch_sfq.c static int sfq_init(struct Qdisc *sch, struct nlattr *opt,
sch               734 net/sched/sch_sfq.c 	struct sfq_sched_data *q = qdisc_priv(sch);
sch               738 net/sched/sch_sfq.c 	q->sch = sch;
sch               741 net/sched/sch_sfq.c 	err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
sch               756 net/sched/sch_sfq.c 	q->quantum = psched_mtu(qdisc_dev(sch));
sch               762 net/sched/sch_sfq.c 		int err = sfq_change(sch, opt);
sch               782 net/sched/sch_sfq.c 		sch->flags |= TCQ_F_CAN_BYPASS;
sch               784 net/sched/sch_sfq.c 		sch->flags &= ~TCQ_F_CAN_BYPASS;
sch               788 net/sched/sch_sfq.c static int sfq_dump(struct Qdisc *sch, struct sk_buff *skb)
sch               790 net/sched/sch_sfq.c 	struct sfq_sched_data *q = qdisc_priv(sch);
sch               825 net/sched/sch_sfq.c static struct Qdisc *sfq_leaf(struct Qdisc *sch, unsigned long arg)
sch               830 net/sched/sch_sfq.c static unsigned long sfq_find(struct Qdisc *sch, u32 classid)
sch               835 net/sched/sch_sfq.c static unsigned long sfq_bind(struct Qdisc *sch, unsigned long parent,
sch               845 net/sched/sch_sfq.c static struct tcf_block *sfq_tcf_block(struct Qdisc *sch, unsigned long cl,
sch               848 net/sched/sch_sfq.c 	struct sfq_sched_data *q = qdisc_priv(sch);
sch               855 net/sched/sch_sfq.c static int sfq_dump_class(struct Qdisc *sch, unsigned long cl,
sch               862 net/sched/sch_sfq.c static int sfq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
sch               865 net/sched/sch_sfq.c 	struct sfq_sched_data *q = qdisc_priv(sch);
sch               882 net/sched/sch_sfq.c static void sfq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
sch               884 net/sched/sch_sfq.c 	struct sfq_sched_data *q = qdisc_priv(sch);
sch               896 net/sched/sch_sfq.c 		if (arg->fn(sch, i + 1, arg) < 0) {
sch                68 net/sched/sch_skbprio.c static int skbprio_enqueue(struct sk_buff *skb, struct Qdisc *sch,
sch                72 net/sched/sch_skbprio.c 	struct skbprio_sched_data *q = qdisc_priv(sch);
sch                82 net/sched/sch_skbprio.c 	if (sch->q.qlen < sch->limit) {
sch                84 net/sched/sch_skbprio.c 		qdisc_qstats_backlog_inc(sch, skb);
sch                94 net/sched/sch_skbprio.c 		sch->q.qlen++;
sch               103 net/sched/sch_skbprio.c 		return qdisc_drop(skb, sch, to_free);
sch               107 net/sched/sch_skbprio.c 	qdisc_qstats_backlog_inc(sch, skb);
sch               114 net/sched/sch_skbprio.c 	qdisc_qstats_backlog_dec(sch, to_drop);
sch               115 net/sched/sch_skbprio.c 	qdisc_drop(to_drop, sch, to_free);
sch               125 net/sched/sch_skbprio.c 			BUG_ON(sch->q.qlen != 1);
sch               139 net/sched/sch_skbprio.c static struct sk_buff *skbprio_dequeue(struct Qdisc *sch)
sch               141 net/sched/sch_skbprio.c 	struct skbprio_sched_data *q = qdisc_priv(sch);
sch               148 net/sched/sch_skbprio.c 	sch->q.qlen--;
sch               149 net/sched/sch_skbprio.c 	qdisc_qstats_backlog_dec(sch, skb);
sch               150 net/sched/sch_skbprio.c 	qdisc_bstats_update(sch, skb);
sch               157 net/sched/sch_skbprio.c 			BUG_ON(sch->q.qlen);
sch               167 net/sched/sch_skbprio.c static int skbprio_change(struct Qdisc *sch, struct nlattr *opt,
sch               175 net/sched/sch_skbprio.c 	sch->limit = ctl->limit;
sch               179 net/sched/sch_skbprio.c static int skbprio_init(struct Qdisc *sch, struct nlattr *opt,
sch               182 net/sched/sch_skbprio.c 	struct skbprio_sched_data *q = qdisc_priv(sch);
sch               192 net/sched/sch_skbprio.c 	sch->limit = 64;
sch               196 net/sched/sch_skbprio.c 	return skbprio_change(sch, opt, extack);
sch               199 net/sched/sch_skbprio.c static int skbprio_dump(struct Qdisc *sch, struct sk_buff *skb)
sch               203 net/sched/sch_skbprio.c 	opt.limit = sch->limit;
sch               211 net/sched/sch_skbprio.c static void skbprio_reset(struct Qdisc *sch)
sch               213 net/sched/sch_skbprio.c 	struct skbprio_sched_data *q = qdisc_priv(sch);
sch               216 net/sched/sch_skbprio.c 	sch->qstats.backlog = 0;
sch               217 net/sched/sch_skbprio.c 	sch->q.qlen = 0;
sch               227 net/sched/sch_skbprio.c static void skbprio_destroy(struct Qdisc *sch)
sch               229 net/sched/sch_skbprio.c 	struct skbprio_sched_data *q = qdisc_priv(sch);
sch               236 net/sched/sch_skbprio.c static struct Qdisc *skbprio_leaf(struct Qdisc *sch, unsigned long arg)
sch               241 net/sched/sch_skbprio.c static unsigned long skbprio_find(struct Qdisc *sch, u32 classid)
sch               246 net/sched/sch_skbprio.c static int skbprio_dump_class(struct Qdisc *sch, unsigned long cl,
sch               253 net/sched/sch_skbprio.c static int skbprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
sch               256 net/sched/sch_skbprio.c 	struct skbprio_sched_data *q = qdisc_priv(sch);
sch               263 net/sched/sch_skbprio.c static void skbprio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
sch               275 net/sched/sch_skbprio.c 		if (arg->fn(sch, i + 1, arg) < 0) {
sch                79 net/sched/sch_taprio.c 	struct sk_buff *(*dequeue)(struct Qdisc *sch);
sch                80 net/sched/sch_taprio.c 	struct sk_buff *(*peek)(struct Qdisc *sch);
sch               185 net/sched/sch_taprio.c 						  struct Qdisc *sch,
sch               196 net/sched/sch_taprio.c 	struct taprio_sched *q = qdisc_priv(sch);
sch               197 net/sched/sch_taprio.c 	struct net_device *dev = qdisc_dev(sch);
sch               261 net/sched/sch_taprio.c static bool is_valid_interval(struct sk_buff *skb, struct Qdisc *sch)
sch               263 net/sched/sch_taprio.c 	struct taprio_sched *q = qdisc_priv(sch);
sch               272 net/sched/sch_taprio.c 	entry = find_entry_to_transmit(skb, sch, sched, admin, skb->tstamp,
sch               342 net/sched/sch_taprio.c static long get_packet_txtime(struct sk_buff *skb, struct Qdisc *sch)
sch               345 net/sched/sch_taprio.c 	struct taprio_sched *q = qdisc_priv(sch);
sch               376 net/sched/sch_taprio.c 		entry = find_entry_to_transmit(skb, sch, sched, admin,
sch               413 net/sched/sch_taprio.c static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch,
sch               416 net/sched/sch_taprio.c 	struct taprio_sched *q = qdisc_priv(sch);
sch               424 net/sched/sch_taprio.c 		return qdisc_drop(skb, sch, to_free);
sch               427 net/sched/sch_taprio.c 		if (!is_valid_interval(skb, sch))
sch               428 net/sched/sch_taprio.c 			return qdisc_drop(skb, sch, to_free);
sch               430 net/sched/sch_taprio.c 		skb->tstamp = get_packet_txtime(skb, sch);
sch               432 net/sched/sch_taprio.c 			return qdisc_drop(skb, sch, to_free);
sch               435 net/sched/sch_taprio.c 	qdisc_qstats_backlog_inc(sch, skb);
sch               436 net/sched/sch_taprio.c 	sch->q.qlen++;
sch               441 net/sched/sch_taprio.c static struct sk_buff *taprio_peek_soft(struct Qdisc *sch)
sch               443 net/sched/sch_taprio.c 	struct taprio_sched *q = qdisc_priv(sch);
sch               444 net/sched/sch_taprio.c 	struct net_device *dev = qdisc_dev(sch);
sch               485 net/sched/sch_taprio.c static struct sk_buff *taprio_peek_offload(struct Qdisc *sch)
sch               487 net/sched/sch_taprio.c 	struct taprio_sched *q = qdisc_priv(sch);
sch               488 net/sched/sch_taprio.c 	struct net_device *dev = qdisc_dev(sch);
sch               508 net/sched/sch_taprio.c static struct sk_buff *taprio_peek(struct Qdisc *sch)
sch               510 net/sched/sch_taprio.c 	struct taprio_sched *q = qdisc_priv(sch);
sch               512 net/sched/sch_taprio.c 	return q->peek(sch);
sch               522 net/sched/sch_taprio.c static struct sk_buff *taprio_dequeue_soft(struct Qdisc *sch)
sch               524 net/sched/sch_taprio.c 	struct taprio_sched *q = qdisc_priv(sch);
sch               525 net/sched/sch_taprio.c 	struct net_device *dev = qdisc_dev(sch);
sch               597 net/sched/sch_taprio.c 		qdisc_bstats_update(sch, skb);
sch               598 net/sched/sch_taprio.c 		qdisc_qstats_backlog_dec(sch, skb);
sch               599 net/sched/sch_taprio.c 		sch->q.qlen--;
sch               610 net/sched/sch_taprio.c static struct sk_buff *taprio_dequeue_offload(struct Qdisc *sch)
sch               612 net/sched/sch_taprio.c 	struct taprio_sched *q = qdisc_priv(sch);
sch               613 net/sched/sch_taprio.c 	struct net_device *dev = qdisc_dev(sch);
sch               627 net/sched/sch_taprio.c 		qdisc_bstats_update(sch, skb);
sch               628 net/sched/sch_taprio.c 		qdisc_qstats_backlog_dec(sch, skb);
sch               629 net/sched/sch_taprio.c 		sch->q.qlen--;
sch               637 net/sched/sch_taprio.c static struct sk_buff *taprio_dequeue(struct Qdisc *sch)
sch               639 net/sched/sch_taprio.c 	struct taprio_sched *q = qdisc_priv(sch);
sch               641 net/sched/sch_taprio.c 	return q->dequeue(sch);
sch               696 net/sched/sch_taprio.c 	struct Qdisc *sch = q->root;
sch               753 net/sched/sch_taprio.c 	__netif_schedule(sch);
sch               969 net/sched/sch_taprio.c static int taprio_get_start_time(struct Qdisc *sch,
sch               973 net/sched/sch_taprio.c 	struct taprio_sched *q = qdisc_priv(sch);
sch              1022 net/sched/sch_taprio.c static void taprio_start_sched(struct Qdisc *sch,
sch              1025 net/sched/sch_taprio.c 	struct taprio_sched *q = qdisc_priv(sch);
sch              1284 net/sched/sch_taprio.c static int taprio_parse_clockid(struct Qdisc *sch, struct nlattr **tb,
sch              1287 net/sched/sch_taprio.c 	struct taprio_sched *q = qdisc_priv(sch);
sch              1288 net/sched/sch_taprio.c 	struct net_device *dev = qdisc_dev(sch);
sch              1406 net/sched/sch_taprio.c static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
sch              1411 net/sched/sch_taprio.c 	struct taprio_sched *q = qdisc_priv(sch);
sch              1412 net/sched/sch_taprio.c 	struct net_device *dev = qdisc_dev(sch);
sch              1469 net/sched/sch_taprio.c 	err = taprio_parse_clockid(sch, tb, extack);
sch              1496 net/sched/sch_taprio.c 	spin_lock_bh(qdisc_lock(sch));
sch              1526 net/sched/sch_taprio.c 	err = taprio_get_start_time(sch, new_admin, &start);
sch              1551 net/sched/sch_taprio.c 		taprio_start_sched(sch, start, new_admin);
sch              1567 net/sched/sch_taprio.c 	spin_unlock_bh(qdisc_lock(sch));
sch              1576 net/sched/sch_taprio.c static void taprio_destroy(struct Qdisc *sch)
sch              1578 net/sched/sch_taprio.c 	struct taprio_sched *q = qdisc_priv(sch);
sch              1579 net/sched/sch_taprio.c 	struct net_device *dev = qdisc_dev(sch);
sch              1607 net/sched/sch_taprio.c static int taprio_init(struct Qdisc *sch, struct nlattr *opt,
sch              1610 net/sched/sch_taprio.c 	struct taprio_sched *q = qdisc_priv(sch);
sch              1611 net/sched/sch_taprio.c 	struct net_device *dev = qdisc_dev(sch);
sch              1622 net/sched/sch_taprio.c 	q->root = sch;
sch              1634 net/sched/sch_taprio.c 	if (sch->parent != TC_H_ROOT)
sch              1658 net/sched/sch_taprio.c 					  TC_H_MAKE(TC_H_MAJ(sch->handle),
sch              1670 net/sched/sch_taprio.c 	return taprio_change(sch, opt, extack);
sch              1673 net/sched/sch_taprio.c static struct netdev_queue *taprio_queue_get(struct Qdisc *sch,
sch              1676 net/sched/sch_taprio.c 	struct net_device *dev = qdisc_dev(sch);
sch              1685 net/sched/sch_taprio.c static int taprio_graft(struct Qdisc *sch, unsigned long cl,
sch              1689 net/sched/sch_taprio.c 	struct taprio_sched *q = qdisc_priv(sch);
sch              1690 net/sched/sch_taprio.c 	struct net_device *dev = qdisc_dev(sch);
sch              1691 net/sched/sch_taprio.c 	struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
sch              1777 net/sched/sch_taprio.c static int taprio_dump(struct Qdisc *sch, struct sk_buff *skb)
sch              1779 net/sched/sch_taprio.c 	struct taprio_sched *q = qdisc_priv(sch);
sch              1780 net/sched/sch_taprio.c 	struct net_device *dev = qdisc_dev(sch);
sch              1847 net/sched/sch_taprio.c static struct Qdisc *taprio_leaf(struct Qdisc *sch, unsigned long cl)
sch              1849 net/sched/sch_taprio.c 	struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
sch              1857 net/sched/sch_taprio.c static unsigned long taprio_find(struct Qdisc *sch, u32 classid)
sch              1861 net/sched/sch_taprio.c 	if (!taprio_queue_get(sch, ntx))
sch              1866 net/sched/sch_taprio.c static int taprio_dump_class(struct Qdisc *sch, unsigned long cl,
sch              1869 net/sched/sch_taprio.c 	struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
sch              1878 net/sched/sch_taprio.c static int taprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
sch              1883 net/sched/sch_taprio.c 	struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
sch              1885 net/sched/sch_taprio.c 	sch = dev_queue->qdisc_sleeping;
sch              1886 net/sched/sch_taprio.c 	if (gnet_stats_copy_basic(&sch->running, d, NULL, &sch->bstats) < 0 ||
sch              1887 net/sched/sch_taprio.c 	    qdisc_qstats_copy(d, sch) < 0)
sch              1892 net/sched/sch_taprio.c static void taprio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
sch              1894 net/sched/sch_taprio.c 	struct net_device *dev = qdisc_dev(sch);
sch              1902 net/sched/sch_taprio.c 		if (arg->fn(sch, ntx + 1, arg) < 0) {
sch              1910 net/sched/sch_taprio.c static struct netdev_queue *taprio_select_queue(struct Qdisc *sch,
sch              1913 net/sched/sch_taprio.c 	return taprio_queue_get(sch, TC_H_MIN(tcm->tcm_parent));
sch               143 net/sched/sch_tbf.c static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch,
sch               146 net/sched/sch_tbf.c 	struct tbf_sched_data *q = qdisc_priv(sch);
sch               155 net/sched/sch_tbf.c 		return qdisc_drop(skb, sch, to_free);
sch               166 net/sched/sch_tbf.c 				qdisc_qstats_drop(sch);
sch               172 net/sched/sch_tbf.c 	sch->q.qlen += nb;
sch               174 net/sched/sch_tbf.c 		qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
sch               179 net/sched/sch_tbf.c static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch,
sch               182 net/sched/sch_tbf.c 	struct tbf_sched_data *q = qdisc_priv(sch);
sch               189 net/sched/sch_tbf.c 			return tbf_segment(skb, sch, to_free);
sch               190 net/sched/sch_tbf.c 		return qdisc_drop(skb, sch, to_free);
sch               195 net/sched/sch_tbf.c 			qdisc_qstats_drop(sch);
sch               199 net/sched/sch_tbf.c 	sch->qstats.backlog += len;
sch               200 net/sched/sch_tbf.c 	sch->q.qlen++;
sch               209 net/sched/sch_tbf.c static struct sk_buff *tbf_dequeue(struct Qdisc *sch)
sch               211 net/sched/sch_tbf.c 	struct tbf_sched_data *q = qdisc_priv(sch);
sch               244 net/sched/sch_tbf.c 			qdisc_qstats_backlog_dec(sch, skb);
sch               245 net/sched/sch_tbf.c 			sch->q.qlen--;
sch               246 net/sched/sch_tbf.c 			qdisc_bstats_update(sch, skb);
sch               264 net/sched/sch_tbf.c 		qdisc_qstats_overlimit(sch);
sch               269 net/sched/sch_tbf.c static void tbf_reset(struct Qdisc *sch)
sch               271 net/sched/sch_tbf.c 	struct tbf_sched_data *q = qdisc_priv(sch);
sch               274 net/sched/sch_tbf.c 	sch->qstats.backlog = 0;
sch               275 net/sched/sch_tbf.c 	sch->q.qlen = 0;
sch               292 net/sched/sch_tbf.c static int tbf_change(struct Qdisc *sch, struct nlattr *opt,
sch               296 net/sched/sch_tbf.c 	struct tbf_sched_data *q = qdisc_priv(sch);
sch               362 net/sched/sch_tbf.c 	if (max_size < psched_mtu(qdisc_dev(sch)))
sch               364 net/sched/sch_tbf.c 				    max_size, qdisc_dev(sch)->name,
sch               365 net/sched/sch_tbf.c 				    psched_mtu(qdisc_dev(sch)));
sch               377 net/sched/sch_tbf.c 		child = fifo_create_dflt(sch, &bfifo_qdisc_ops, qopt->limit,
sch               388 net/sched/sch_tbf.c 	sch_tree_lock(sch);
sch               410 net/sched/sch_tbf.c 	sch_tree_unlock(sch);
sch               416 net/sched/sch_tbf.c static int tbf_init(struct Qdisc *sch, struct nlattr *opt,
sch               419 net/sched/sch_tbf.c 	struct tbf_sched_data *q = qdisc_priv(sch);
sch               421 net/sched/sch_tbf.c 	qdisc_watchdog_init(&q->watchdog, sch);
sch               429 net/sched/sch_tbf.c 	return tbf_change(sch, opt, extack);
sch               432 net/sched/sch_tbf.c static void tbf_destroy(struct Qdisc *sch)
sch               434 net/sched/sch_tbf.c 	struct tbf_sched_data *q = qdisc_priv(sch);
sch               440 net/sched/sch_tbf.c static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb)
sch               442 net/sched/sch_tbf.c 	struct tbf_sched_data *q = qdisc_priv(sch);
sch               446 net/sched/sch_tbf.c 	sch->qstats.backlog = q->qdisc->qstats.backlog;
sch               478 net/sched/sch_tbf.c static int tbf_dump_class(struct Qdisc *sch, unsigned long cl,
sch               481 net/sched/sch_tbf.c 	struct tbf_sched_data *q = qdisc_priv(sch);
sch               489 net/sched/sch_tbf.c static int tbf_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
sch               492 net/sched/sch_tbf.c 	struct tbf_sched_data *q = qdisc_priv(sch);
sch               497 net/sched/sch_tbf.c 	*old = qdisc_replace(sch, new, &q->qdisc);
sch               501 net/sched/sch_tbf.c static struct Qdisc *tbf_leaf(struct Qdisc *sch, unsigned long arg)
sch               503 net/sched/sch_tbf.c 	struct tbf_sched_data *q = qdisc_priv(sch);
sch               507 net/sched/sch_tbf.c static unsigned long tbf_find(struct Qdisc *sch, u32 classid)
sch               512 net/sched/sch_tbf.c static void tbf_walk(struct Qdisc *sch, struct qdisc_walker *walker)
sch               516 net/sched/sch_tbf.c 			if (walker->fn(sch, 1, walker) < 0) {
sch                76 net/sched/sch_teql.c teql_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
sch                78 net/sched/sch_teql.c 	struct net_device *dev = qdisc_dev(sch);
sch                79 net/sched/sch_teql.c 	struct teql_sched_data *q = qdisc_priv(sch);
sch                86 net/sched/sch_teql.c 	return qdisc_drop(skb, sch, to_free);
sch                90 net/sched/sch_teql.c teql_dequeue(struct Qdisc *sch)
sch                92 net/sched/sch_teql.c 	struct teql_sched_data *dat = qdisc_priv(sch);
sch               104 net/sched/sch_teql.c 			dat->m->slaves = sch;
sch               108 net/sched/sch_teql.c 		qdisc_bstats_update(sch, skb);
sch               110 net/sched/sch_teql.c 	sch->q.qlen = dat->q.qlen + q->q.qlen;
sch               115 net/sched/sch_teql.c teql_peek(struct Qdisc *sch)
sch               122 net/sched/sch_teql.c teql_reset(struct Qdisc *sch)
sch               124 net/sched/sch_teql.c 	struct teql_sched_data *dat = qdisc_priv(sch);
sch               127 net/sched/sch_teql.c 	sch->q.qlen = 0;
sch               131 net/sched/sch_teql.c teql_destroy(struct Qdisc *sch)
sch               134 net/sched/sch_teql.c 	struct teql_sched_data *dat = qdisc_priv(sch);
sch               141 net/sched/sch_teql.c 			if (q == sch) {
sch               166 net/sched/sch_teql.c static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt,
sch               169 net/sched/sch_teql.c 	struct net_device *dev = qdisc_dev(sch);
sch               170 net/sched/sch_teql.c 	struct teql_master *m = (struct teql_master *)sch->ops;
sch               171 net/sched/sch_teql.c 	struct teql_sched_data *q = qdisc_priv(sch);
sch               204 net/sched/sch_teql.c 		NEXT_SLAVE(m->slaves) = sch;
sch               206 net/sched/sch_teql.c 		q->next = sch;
sch               207 net/sched/sch_teql.c 		m->slaves = sch;