qopt             1576 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	u8 *prio_tc = mqprio_qopt->qopt.prio_tc_map;
qopt             1578 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	u8 tc = mqprio_qopt->qopt.num_tc;
qopt             1580 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	u8 hw = mqprio_qopt->qopt.hw;
qopt             1686 drivers/net/ethernet/intel/i40e/i40e_main.c 	vsi->tc_config.numtc = vsi->mqprio_qopt.qopt.num_tc;
qopt             1688 drivers/net/ethernet/intel/i40e/i40e_main.c 	num_qps = vsi->mqprio_qopt.qopt.count[0];
qopt             1698 drivers/net/ethernet/intel/i40e/i40e_main.c 	max_qcount = vsi->mqprio_qopt.qopt.count[0];
qopt             1702 drivers/net/ethernet/intel/i40e/i40e_main.c 			offset = vsi->mqprio_qopt.qopt.offset[i];
qopt             1703 drivers/net/ethernet/intel/i40e/i40e_main.c 			qcount = vsi->mqprio_qopt.qopt.count[i];
qopt             1745 drivers/net/ethernet/intel/i40e/i40e_main.c 	override_q = vsi->mqprio_qopt.qopt.count[0];
qopt             5093 drivers/net/ethernet/intel/i40e/i40e_main.c 	u8 num_tc = vsi->mqprio_qopt.qopt.num_tc;
qopt             5115 drivers/net/ethernet/intel/i40e/i40e_main.c 		return pf->vsi[pf->lan_vsi]->mqprio_qopt.qopt.num_tc;
qopt             5245 drivers/net/ethernet/intel/i40e/i40e_main.c 	if (!vsi->mqprio_qopt.qopt.hw && !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
qopt             5434 drivers/net/ethernet/intel/i40e/i40e_main.c 	if (!vsi->mqprio_qopt.qopt.hw && vsi->reconfig_rss) {
qopt             6830 drivers/net/ethernet/intel/i40e/i40e_main.c 	if (mqprio_qopt->qopt.offset[0] != 0 ||
qopt             6831 drivers/net/ethernet/intel/i40e/i40e_main.c 	    mqprio_qopt->qopt.num_tc < 1 ||
qopt             6832 drivers/net/ethernet/intel/i40e/i40e_main.c 	    mqprio_qopt->qopt.num_tc > I40E_MAX_TRAFFIC_CLASS)
qopt             6835 drivers/net/ethernet/intel/i40e/i40e_main.c 		if (!mqprio_qopt->qopt.count[i])
qopt             6846 drivers/net/ethernet/intel/i40e/i40e_main.c 		if (i >= mqprio_qopt->qopt.num_tc - 1)
qopt             6848 drivers/net/ethernet/intel/i40e/i40e_main.c 		if (mqprio_qopt->qopt.offset[i + 1] !=
qopt             6849 drivers/net/ethernet/intel/i40e/i40e_main.c 		    (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
qopt             6853 drivers/net/ethernet/intel/i40e/i40e_main.c 	    (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) {
qopt             7394 drivers/net/ethernet/intel/i40e/i40e_main.c 	num_tc = mqprio_qopt->qopt.num_tc;
qopt             7395 drivers/net/ethernet/intel/i40e/i40e_main.c 	hw = mqprio_qopt->qopt.hw;
qopt             2539 drivers/net/ethernet/intel/iavf/iavf_main.c 	if (mqprio_qopt->qopt.num_tc > IAVF_MAX_TRAFFIC_CLASS ||
qopt             2540 drivers/net/ethernet/intel/iavf/iavf_main.c 	    mqprio_qopt->qopt.num_tc < 1)
qopt             2543 drivers/net/ethernet/intel/iavf/iavf_main.c 	for (i = 0; i <= mqprio_qopt->qopt.num_tc - 1; i++) {
qopt             2544 drivers/net/ethernet/intel/iavf/iavf_main.c 		if (!mqprio_qopt->qopt.count[i] ||
qopt             2545 drivers/net/ethernet/intel/iavf/iavf_main.c 		    mqprio_qopt->qopt.offset[i] != num_qps)
qopt             2556 drivers/net/ethernet/intel/iavf/iavf_main.c 		num_qps += mqprio_qopt->qopt.count[i];
qopt             2605 drivers/net/ethernet/intel/iavf/iavf_main.c 	num_tc = mqprio_qopt->qopt.num_tc;
qopt             2609 drivers/net/ethernet/intel/iavf/iavf_main.c 	if (!mqprio_qopt->qopt.hw) {
qopt             2646 drivers/net/ethernet/intel/iavf/iavf_main.c 					mqprio_qopt->qopt.count[i];
qopt             2648 drivers/net/ethernet/intel/iavf/iavf_main.c 					mqprio_qopt->qopt.offset[i];
qopt             2649 drivers/net/ethernet/intel/iavf/iavf_main.c 				total_qps += mqprio_qopt->qopt.count[i];
qopt             2669 drivers/net/ethernet/intel/iavf/iavf_main.c 			u16 qcount = mqprio_qopt->qopt.count[i];
qopt             2670 drivers/net/ethernet/intel/iavf/iavf_main.c 			u16 qoffset = mqprio_qopt->qopt.offset[i];
qopt             2555 drivers/net/ethernet/intel/igb/igb_main.c 			   struct tc_cbs_qopt_offload *qopt)
qopt             2565 drivers/net/ethernet/intel/igb/igb_main.c 	if (qopt->queue < 0 || qopt->queue > 1)
qopt             2568 drivers/net/ethernet/intel/igb/igb_main.c 	err = igb_save_cbs_params(adapter, qopt->queue, qopt->enable,
qopt             2569 drivers/net/ethernet/intel/igb/igb_main.c 				  qopt->idleslope, qopt->sendslope,
qopt             2570 drivers/net/ethernet/intel/igb/igb_main.c 				  qopt->hicredit, qopt->locredit);
qopt             2574 drivers/net/ethernet/intel/igb/igb_main.c 	igb_offload_apply(adapter, qopt->queue);
qopt             2789 drivers/net/ethernet/intel/igb/igb_main.c 			      struct tc_etf_qopt_offload *qopt)
qopt             2799 drivers/net/ethernet/intel/igb/igb_main.c 	if (qopt->queue < 0 || qopt->queue > 1)
qopt             2802 drivers/net/ethernet/intel/igb/igb_main.c 	err = igb_save_txtime_params(adapter, qopt->queue, qopt->enable);
qopt             2806 drivers/net/ethernet/intel/igb/igb_main.c 	igb_offload_apply(adapter, qopt->queue);
qopt              523 drivers/net/ethernet/stmicro/stmmac/hwif.h 			 struct tc_cbs_qopt_offload *qopt);
qopt              310 drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c 			struct tc_cbs_qopt_offload *qopt)
qopt              313 drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c 	u32 queue = qopt->queue;
qopt              328 drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c 	if (mode_to_use == MTL_QUEUE_DCB && qopt->enable) {
qopt              334 drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c 	} else if (!qopt->enable) {
qopt              343 drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c 	value = div_s64(qopt->idleslope * 1024ll * ptr, speed_div);
qopt              346 drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c 	value = div_s64(-qopt->sendslope * 1024ll * ptr, speed_div);
qopt              349 drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c 	value = qopt->hicredit * 1024ll * 8;
qopt              352 drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c 	value = qopt->locredit * 1024ll * 8;
qopt              365 drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c 			queue, qopt->sendslope, qopt->idleslope,
qopt              366 drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c 			qopt->hicredit, qopt->locredit);
qopt             1532 drivers/net/ethernet/ti/cpsw.c 			struct tc_cbs_qopt_offload *qopt)
qopt             1541 drivers/net/ethernet/ti/cpsw.c 	tc = netdev_txq_to_tc(priv->ndev, qopt->queue);
qopt             1554 drivers/net/ethernet/ti/cpsw.c 	if (!qopt->enable && !priv->fifo_bw[fifo])
qopt             1578 drivers/net/ethernet/ti/cpsw.c 	bw = qopt->enable ? qopt->idleslope : 0;
qopt             2279 drivers/net/ethernet/ti/cpsw.c 	num_tc = mqprio->qopt.num_tc;
qopt             2294 drivers/net/ethernet/ti/cpsw.c 			tc = mqprio->qopt.prio_tc_map[i];
qopt             2301 drivers/net/ethernet/ti/cpsw.c 			count = mqprio->qopt.count[i];
qopt             2302 drivers/net/ethernet/ti/cpsw.c 			offset = mqprio->qopt.offset[i];
qopt             2307 drivers/net/ethernet/ti/cpsw.c 	if (!mqprio->qopt.hw) {
qopt             2313 drivers/net/ethernet/ti/cpsw.c 	priv->mqprio_hw = mqprio->qopt.hw;
qopt              672 include/net/pkt_cls.h 	struct tc_mqprio_qopt qopt;
qopt              366 net/sched/sch_cbs.c 	struct tc_cbs_qopt *qopt;
qopt              379 net/sched/sch_cbs.c 	qopt = nla_data(tb[TCA_CBS_PARMS]);
qopt              381 net/sched/sch_cbs.c 	if (!qopt->offload) {
qopt              385 net/sched/sch_cbs.c 		err = cbs_enable_offload(dev, q, qopt, extack);
qopt              391 net/sched/sch_cbs.c 	q->hicredit = qopt->hicredit;
qopt              392 net/sched/sch_cbs.c 	q->locredit = qopt->locredit;
qopt              393 net/sched/sch_cbs.c 	q->idleslope = qopt->idleslope * BYTES_PER_KBIT;
qopt              394 net/sched/sch_cbs.c 	q->sendslope = qopt->sendslope * BYTES_PER_KBIT;
qopt              395 net/sched/sch_cbs.c 	q->offload = qopt->offload;
qopt               44 net/sched/sch_etf.c static inline int validate_input_params(struct tc_etf_qopt *qopt,
qopt               57 net/sched/sch_etf.c 	if (qopt->clockid < 0) {
qopt               62 net/sched/sch_etf.c 	if (qopt->clockid != CLOCK_TAI) {
qopt               67 net/sched/sch_etf.c 	if (qopt->delta < 0) {
qopt              352 net/sched/sch_etf.c 	struct tc_etf_qopt *qopt;
qopt              371 net/sched/sch_etf.c 	qopt = nla_data(tb[TCA_ETF_PARMS]);
qopt              374 net/sched/sch_etf.c 		 qopt->delta, qopt->clockid,
qopt              375 net/sched/sch_etf.c 		 OFFLOAD_IS_ON(qopt) ? "on" : "off",
qopt              376 net/sched/sch_etf.c 		 DEADLINE_MODE_IS_ON(qopt) ? "on" : "off");
qopt              378 net/sched/sch_etf.c 	err = validate_input_params(qopt, extack);
qopt              384 net/sched/sch_etf.c 	if (OFFLOAD_IS_ON(qopt)) {
qopt              391 net/sched/sch_etf.c 	q->delta = qopt->delta;
qopt              392 net/sched/sch_etf.c 	q->clockid = qopt->clockid;
qopt              393 net/sched/sch_etf.c 	q->offload = OFFLOAD_IS_ON(qopt);
qopt              394 net/sched/sch_etf.c 	q->deadline_mode = DEADLINE_MODE_IS_ON(qopt);
qopt              395 net/sched/sch_etf.c 	q->skip_sock_check = SKIP_SOCK_CHECK_IS_SET(qopt);
qopt             1389 net/sched/sch_hfsc.c 	struct tc_hfsc_qopt *qopt;
qopt             1394 net/sched/sch_hfsc.c 	if (!opt || nla_len(opt) < sizeof(*qopt))
qopt             1396 net/sched/sch_hfsc.c 	qopt = nla_data(opt);
qopt             1398 net/sched/sch_hfsc.c 	q->defcls = qopt->defcls;
qopt             1431 net/sched/sch_hfsc.c 	struct tc_hfsc_qopt *qopt;
qopt             1433 net/sched/sch_hfsc.c 	if (opt == NULL || nla_len(opt) < sizeof(*qopt))
qopt             1435 net/sched/sch_hfsc.c 	qopt = nla_data(opt);
qopt             1438 net/sched/sch_hfsc.c 	q->defcls = qopt->defcls;
qopt             1519 net/sched/sch_hfsc.c 	struct tc_hfsc_qopt qopt;
qopt             1521 net/sched/sch_hfsc.c 	qopt.defcls = q->defcls;
qopt             1522 net/sched/sch_hfsc.c 	if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
qopt               62 net/sched/sch_mqprio.c static int mqprio_parse_opt(struct net_device *dev, struct tc_mqprio_qopt *qopt)
qopt               67 net/sched/sch_mqprio.c 	if (qopt->num_tc > TC_MAX_QUEUE)
qopt               72 net/sched/sch_mqprio.c 		if (qopt->prio_tc_map[i] >= qopt->num_tc)
qopt               80 net/sched/sch_mqprio.c 	if (qopt->hw > TC_MQPRIO_HW_OFFLOAD_MAX)
qopt               81 net/sched/sch_mqprio.c 		qopt->hw = TC_MQPRIO_HW_OFFLOAD_MAX;
qopt               88 net/sched/sch_mqprio.c 	if (qopt->hw)
qopt               91 net/sched/sch_mqprio.c 	for (i = 0; i < qopt->num_tc; i++) {
qopt               92 net/sched/sch_mqprio.c 		unsigned int last = qopt->offset[i] + qopt->count[i];
qopt               97 net/sched/sch_mqprio.c 		if (qopt->offset[i] >= dev->real_num_tx_queues ||
qopt               98 net/sched/sch_mqprio.c 		    !qopt->count[i] ||
qopt              103 net/sched/sch_mqprio.c 		for (j = i + 1; j < qopt->num_tc; j++) {
qopt              104 net/sched/sch_mqprio.c 			if (last > qopt->offset[j])
qopt              141 net/sched/sch_mqprio.c 	struct tc_mqprio_qopt *qopt = NULL;
qopt              160 net/sched/sch_mqprio.c 	if (!opt || nla_len(opt) < sizeof(*qopt))
qopt              163 net/sched/sch_mqprio.c 	qopt = nla_data(opt);
qopt              164 net/sched/sch_mqprio.c 	if (mqprio_parse_opt(dev, qopt))
qopt              167 net/sched/sch_mqprio.c 	len = nla_len(opt) - NLA_ALIGN(sizeof(*qopt));
qopt              170 net/sched/sch_mqprio.c 				 sizeof(*qopt));
qopt              174 net/sched/sch_mqprio.c 		if (!qopt->hw)
qopt              195 net/sched/sch_mqprio.c 				if (i >= qopt->num_tc)
qopt              211 net/sched/sch_mqprio.c 				if (i >= qopt->num_tc)
qopt              243 net/sched/sch_mqprio.c 	if (qopt->hw) {
qopt              244 net/sched/sch_mqprio.c 		struct tc_mqprio_qopt_offload mqprio = {.qopt = *qopt};
qopt              258 net/sched/sch_mqprio.c 				for (i = 0; i < mqprio.qopt.num_tc; i++)
qopt              261 net/sched/sch_mqprio.c 				for (i = 0; i < mqprio.qopt.num_tc; i++)
qopt              273 net/sched/sch_mqprio.c 		priv->hw_offload = mqprio.qopt.hw;
qopt              275 net/sched/sch_mqprio.c 		netdev_set_num_tc(dev, qopt->num_tc);
qopt              276 net/sched/sch_mqprio.c 		for (i = 0; i < qopt->num_tc; i++)
qopt              278 net/sched/sch_mqprio.c 					    qopt->count[i], qopt->offset[i]);
qopt              283 net/sched/sch_mqprio.c 		netdev_set_prio_tc_map(dev, i, qopt->prio_tc_map[i]);
qopt              176 net/sched/sch_multiq.c 	struct tc_multiq_qopt *qopt;
qopt              182 net/sched/sch_multiq.c 	if (nla_len(opt) < sizeof(*qopt))
qopt              185 net/sched/sch_multiq.c 	qopt = nla_data(opt);
qopt              187 net/sched/sch_multiq.c 	qopt->bands = qdisc_dev(sch)->real_num_tx_queues;
qopt              195 net/sched/sch_multiq.c 	q->bands = qopt->bands;
qopt              955 net/sched/sch_netem.c 	struct tc_netem_qopt *qopt;
qopt              963 net/sched/sch_netem.c 	qopt = nla_data(opt);
qopt              964 net/sched/sch_netem.c 	ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt));
qopt              996 net/sched/sch_netem.c 	sch->limit = qopt->limit;
qopt              998 net/sched/sch_netem.c 	q->latency = PSCHED_TICKS2NS(qopt->latency);
qopt              999 net/sched/sch_netem.c 	q->jitter = PSCHED_TICKS2NS(qopt->jitter);
qopt             1000 net/sched/sch_netem.c 	q->limit = qopt->limit;
qopt             1001 net/sched/sch_netem.c 	q->gap = qopt->gap;
qopt             1003 net/sched/sch_netem.c 	q->loss = qopt->loss;
qopt             1004 net/sched/sch_netem.c 	q->duplicate = qopt->duplicate;
qopt             1135 net/sched/sch_netem.c 	struct tc_netem_qopt qopt;
qopt             1142 net/sched/sch_netem.c 	qopt.latency = min_t(psched_tdiff_t, PSCHED_NS2TICKS(q->latency),
qopt             1144 net/sched/sch_netem.c 	qopt.jitter = min_t(psched_tdiff_t, PSCHED_NS2TICKS(q->jitter),
qopt             1146 net/sched/sch_netem.c 	qopt.limit = q->limit;
qopt             1147 net/sched/sch_netem.c 	qopt.loss = q->loss;
qopt             1148 net/sched/sch_netem.c 	qopt.gap = q->gap;
qopt             1149 net/sched/sch_netem.c 	qopt.duplicate = q->duplicate;
qopt             1150 net/sched/sch_netem.c 	if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
qopt              142 net/sched/sch_prio.c static int prio_offload(struct Qdisc *sch, struct tc_prio_qopt *qopt)
qopt              153 net/sched/sch_prio.c 	if (qopt) {
qopt              155 net/sched/sch_prio.c 		opt.replace_params.bands = qopt->bands;
qopt              156 net/sched/sch_prio.c 		memcpy(&opt.replace_params.priomap, qopt->priomap,
qopt              184 net/sched/sch_prio.c 	struct tc_prio_qopt *qopt;
qopt              186 net/sched/sch_prio.c 	if (nla_len(opt) < sizeof(*qopt))
qopt              188 net/sched/sch_prio.c 	qopt = nla_data(opt);
qopt              190 net/sched/sch_prio.c 	if (qopt->bands > TCQ_PRIO_BANDS || qopt->bands < 2)
qopt              194 net/sched/sch_prio.c 		if (qopt->priomap[i] >= qopt->bands)
qopt              199 net/sched/sch_prio.c 	for (i = oldbands; i < qopt->bands; i++) {
qopt              210 net/sched/sch_prio.c 	prio_offload(sch, qopt);
qopt              212 net/sched/sch_prio.c 	q->bands = qopt->bands;
qopt              213 net/sched/sch_prio.c 	memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1);
qopt              904 net/sched/sch_taprio.c 				   struct tc_mqprio_qopt *qopt,
qopt              910 net/sched/sch_taprio.c 	if (!qopt && !dev->num_tc) {
qopt              922 net/sched/sch_taprio.c 	if (qopt->num_tc > TC_MAX_QUEUE) {
qopt              928 net/sched/sch_taprio.c 	if (qopt->num_tc > dev->num_tx_queues) {
qopt              935 net/sched/sch_taprio.c 		if (qopt->prio_tc_map[i] >= qopt->num_tc) {
qopt              941 net/sched/sch_taprio.c 	for (i = 0; i < qopt->num_tc; i++) {
qopt              942 net/sched/sch_taprio.c 		unsigned int last = qopt->offset[i] + qopt->count[i];
qopt              947 net/sched/sch_taprio.c 		if (qopt->offset[i] >= dev->num_tx_queues ||
qopt              948 net/sched/sch_taprio.c 		    !qopt->count[i] ||
qopt              958 net/sched/sch_taprio.c 		for (j = i + 1; j < qopt->num_tc; j++) {
qopt              959 net/sched/sch_taprio.c 			if (last > qopt->offset[j]) {
qopt              298 net/sched/sch_tbf.c 	struct tc_tbf_qopt *qopt;
qopt              315 net/sched/sch_tbf.c 	qopt = nla_data(tb[TCA_TBF_PARMS]);
qopt              316 net/sched/sch_tbf.c 	if (qopt->rate.linklayer == TC_LINKLAYER_UNAWARE)
qopt              317 net/sched/sch_tbf.c 		qdisc_put_rtab(qdisc_get_rtab(&qopt->rate,
qopt              321 net/sched/sch_tbf.c 	if (qopt->peakrate.linklayer == TC_LINKLAYER_UNAWARE)
qopt              322 net/sched/sch_tbf.c 			qdisc_put_rtab(qdisc_get_rtab(&qopt->peakrate,
qopt              326 net/sched/sch_tbf.c 	buffer = min_t(u64, PSCHED_TICKS2NS(qopt->buffer), ~0U);
qopt              327 net/sched/sch_tbf.c 	mtu = min_t(u64, PSCHED_TICKS2NS(qopt->mtu), ~0U);
qopt              331 net/sched/sch_tbf.c 	psched_ratecfg_precompute(&rate, &qopt->rate, rate64);
qopt              340 net/sched/sch_tbf.c 	if (qopt->peakrate.rate) {
qopt              343 net/sched/sch_tbf.c 		psched_ratecfg_precompute(&peak, &qopt->peakrate, prate64);
qopt              373 net/sched/sch_tbf.c 		err = fifo_set_limit(q->qdisc, qopt->limit);
qopt              376 net/sched/sch_tbf.c 	} else if (qopt->limit > 0) {
qopt              377 net/sched/sch_tbf.c 		child = fifo_create_dflt(sch, &bfifo_qdisc_ops, qopt->limit,
qopt              394 net/sched/sch_tbf.c 	q->limit = qopt->limit;
qopt              398 net/sched/sch_tbf.c 		q->mtu = PSCHED_TICKS2NS(qopt->mtu);
qopt              403 net/sched/sch_tbf.c 		q->buffer = PSCHED_TICKS2NS(qopt->buffer);