qs                 22 arch/s390/include/uapi/asm/runtime_instr.h 	__u32 qs		: 1;
qs               1255 arch/s390/kernel/ptrace.c 		cb->qs == 0 &&
qs                257 drivers/md/dm-cache-policy-smq.c 	struct ilist qs[MAX_LEVELS];
qs                278 drivers/md/dm-cache-policy-smq.c 		l_init(q->qs + i);
qs                302 drivers/md/dm-cache-policy-smq.c 	l_add_tail(q->es, q->qs + e->level, e);
qs                312 drivers/md/dm-cache-policy-smq.c 	l_add_head(q->es, q->qs + e->level, e);
qs                322 drivers/md/dm-cache-policy-smq.c 	l_add_before(q->es, q->qs + e->level, old, e);
qs                327 drivers/md/dm-cache-policy-smq.c 	l_del(q->es, q->qs + e->level, e);
qs                343 drivers/md/dm-cache-policy-smq.c 		for (e = l_head(q->es, q->qs + level); e; e = l_next(q->es, e)) {
qs                377 drivers/md/dm-cache-policy-smq.c 		for (e = l_head(q->es, q->qs + level); e; e = l_next(q->es, e))
qs                379 drivers/md/dm-cache-policy-smq.c 				l_del(q->es, q->qs + e->level, e);
qs                436 drivers/md/dm-cache-policy-smq.c 		l = q->qs + level;
qs                456 drivers/md/dm-cache-policy-smq.c 		l_above = q->qs + level + 1u;
qs                479 drivers/md/dm-cache-policy-smq.c 		for (de = l_head(q->es, q->qs + new_level); de && de->sentinel; de = l_next(q->es, de))
qs                494 drivers/net/ethernet/brocade/bna/bfi_enet.h 		struct bfi_enet_rxq	qs;	/* small/header buffers */
qs               1635 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 			bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].qs.q,
qs               1637 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 			cfg_req->q_cfg[i].qs.rx_buffer_size =
qs                283 drivers/net/ethernet/cavium/thunder/nic.h 	struct queue_set	*qs;
qs                587 drivers/net/ethernet/cavium/thunder/nic.h 	struct qs_cfg_msg	qs;
qs                981 drivers/net/ethernet/cavium/thunder/nic_main.c 			   (mbx.qs.num << NIC_QS_ID_SHIFT);
qs                982 drivers/net/ethernet/cavium/thunder/nic_main.c 		cfg = mbx.qs.cfg;
qs               1026 drivers/net/ethernet/cavium/thunder/nic_main.c 		nic_tx_channel_cfg(nic, mbx.qs.num, &mbx.sq);
qs                218 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c 	for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) {
qs                226 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c 	for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) {
qs                283 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c 		       (nic->qs->rq_cnt + nic->qs->sq_cnt);
qs                291 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c 				(snic->qs->rq_cnt + snic->qs->sq_cnt);
qs                307 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c 	for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) {
qs                310 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c 			*((*data)++) = ((u64 *)&nic->qs->rq[qidx].stats)
qs                314 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c 	for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) {
qs                317 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c 			*((*data)++) = ((u64 *)&nic->qs->sq[qidx].stats)
qs                472 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c 	struct queue_set *qs = nic->qs;
qs                475 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c 	ring->rx_pending = qs->cq_len;
qs                477 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c 	ring->tx_pending = qs->sq_len;
qs                484 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c 	struct queue_set *qs = nic->qs;
qs                499 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c 	if ((tx_count == qs->sq_len) && (rx_count == qs->cq_len))
qs                503 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c 	qs->sq_len = rounddown_pow_of_two(tx_count);
qs                504 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c 	qs->cq_len = rounddown_pow_of_two(rx_count);
qs                761 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c 	nic->qs->rq_cnt = min_t(u8, nic->rx_queues, MAX_RCV_QUEUES_PER_QS);
qs                762 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c 	nic->qs->sq_cnt = min_t(u8, txq_count, MAX_SND_QUEUES_PER_QS);
qs                763 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c 	nic->qs->cq_cnt = max(nic->qs->rq_cnt, nic->qs->sq_cnt);
qs                321 drivers/net/ethernet/cavium/thunder/nicvf_main.c 	mbx.cpi_cfg.rq_cnt = nic->qs->rq_cnt;
qs                443 drivers/net/ethernet/cavium/thunder/nicvf_main.c 			nic->snicvf[sqs]->qs->rq_cnt = MAX_RCV_QUEUES_PER_QS;
qs                446 drivers/net/ethernet/cavium/thunder/nicvf_main.c 			nic->snicvf[sqs]->qs->rq_cnt = rx_queues;
qs                451 drivers/net/ethernet/cavium/thunder/nicvf_main.c 			nic->snicvf[sqs]->qs->sq_cnt = MAX_SND_QUEUES_PER_QS;
qs                454 drivers/net/ethernet/cavium/thunder/nicvf_main.c 			nic->snicvf[sqs]->qs->sq_cnt = tx_queues;
qs                458 drivers/net/ethernet/cavium/thunder/nicvf_main.c 		nic->snicvf[sqs]->qs->cq_cnt =
qs                459 drivers/net/ethernet/cavium/thunder/nicvf_main.c 		max(nic->snicvf[sqs]->qs->rq_cnt, nic->snicvf[sqs]->qs->sq_cnt);
qs                670 drivers/net/ethernet/cavium/thunder/nicvf_main.c 	sq = &nic->qs->sq[cqe_tx->sq_idx];
qs                855 drivers/net/ethernet/cavium/thunder/nicvf_main.c 	struct queue_set *qs = nic->qs;
qs                856 drivers/net/ethernet/cavium/thunder/nicvf_main.c 	struct cmp_queue *cq = &qs->cq[cq_idx];
qs                859 drivers/net/ethernet/cavium/thunder/nicvf_main.c 	struct snd_queue *sq = &qs->sq[cq_idx];
qs                860 drivers/net/ethernet/cavium/thunder/nicvf_main.c 	struct rcv_queue *rq = &qs->rq[cq_idx];
qs                991 drivers/net/ethernet/cavium/thunder/nicvf_main.c 	struct queue_set *qs = nic->qs;
qs                998 drivers/net/ethernet/cavium/thunder/nicvf_main.c 	for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
qs               1007 drivers/net/ethernet/cavium/thunder/nicvf_main.c 		nicvf_cmp_queue_config(nic, qs, qidx, true);
qs               1008 drivers/net/ethernet/cavium/thunder/nicvf_main.c 		nicvf_sq_free_used_descs(nic->netdev, &qs->sq[qidx], qidx);
qs               1009 drivers/net/ethernet/cavium/thunder/nicvf_main.c 		nicvf_sq_enable(nic, &qs->sq[qidx], qidx);
qs               1071 drivers/net/ethernet/cavium/thunder/nicvf_main.c 	for (qidx = 0; qidx < nic->qs->rbdr_cnt; qidx++) {
qs               1141 drivers/net/ethernet/cavium/thunder/nicvf_main.c 	for (irq = 0; irq < nic->qs->cq_cnt; irq++) {
qs               1152 drivers/net/ethernet/cavium/thunder/nicvf_main.c 	     irq < (NICVF_INTR_ID_RBDR + nic->qs->rbdr_cnt); irq++) {
qs               1291 drivers/net/ethernet/cavium/thunder/nicvf_main.c 	sq = &snic->qs->sq[qid];
qs               1318 drivers/net/ethernet/cavium/thunder/nicvf_main.c 	for (qidx = 0; qidx < nic->qs->cq_cnt; qidx++) {
qs               1331 drivers/net/ethernet/cavium/thunder/nicvf_main.c 	struct queue_set *qs = nic->qs;
qs               1359 drivers/net/ethernet/cavium/thunder/nicvf_main.c 	for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
qs               1375 drivers/net/ethernet/cavium/thunder/nicvf_main.c 	for (qidx = 0; qidx < nic->qs->cq_cnt; qidx++) {
qs               1456 drivers/net/ethernet/cavium/thunder/nicvf_main.c 	struct queue_set *qs = nic->qs;
qs               1470 drivers/net/ethernet/cavium/thunder/nicvf_main.c 	for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
qs               1549 drivers/net/ethernet/cavium/thunder/nicvf_main.c 	for (qidx = 0; qidx < qs->cq_cnt; qidx++)
qs               1553 drivers/net/ethernet/cavium/thunder/nicvf_main.c 	for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
qs               1573 drivers/net/ethernet/cavium/thunder/nicvf_main.c 	for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
qs               1668 drivers/net/ethernet/cavium/thunder/nicvf_main.c 	struct queue_set *qs = nic->qs;
qs               1719 drivers/net/ethernet/cavium/thunder/nicvf_main.c 	for (qidx = 0; qidx < qs->rq_cnt; qidx++)
qs               1721 drivers/net/ethernet/cavium/thunder/nicvf_main.c 	for (qidx = 0; qidx < qs->sq_cnt; qidx++)
qs               1830 drivers/net/ethernet/cavium/thunder/nicvf_main.c 	nic->qs->rq_cnt = min_t(u8, nic->rx_queues, MAX_RCV_QUEUES_PER_QS);
qs               1831 drivers/net/ethernet/cavium/thunder/nicvf_main.c 	nic->qs->sq_cnt = min_t(u8, txq_count, MAX_SND_QUEUES_PER_QS);
qs               1832 drivers/net/ethernet/cavium/thunder/nicvf_main.c 	nic->qs->cq_cnt = max_t(u8, nic->qs->rq_cnt, nic->qs->sq_cnt);
qs                381 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	struct queue_set *qs = nic->qs;
qs                382 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	int rbdr_idx = qs->rbdr_cnt;
qs                394 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	rbdr = &qs->rbdr[rbdr_idx];
qs                403 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	if (qcount >= (qs->rbdr_len - 1))
qs                406 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 		refill_rb_cnt = qs->rbdr_len - qcount - 1;
qs                627 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 				    struct queue_set *qs, int qidx)
qs                639 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 				    struct queue_set *qs, int qidx)
qs                649 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 				    struct queue_set *qs, int qidx)
qs                744 drivers/net/ethernet/cavium/thunder/nicvf_queues.c static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
qs                751 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	rq = &qs->rq[qidx];
qs                758 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 		nicvf_reclaim_rcv_queue(nic, qs, qidx);
qs                763 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	rq->cq_qs = qs->vnic_id;
qs                765 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	rq->start_rbdr_qs = qs->vnic_id;
qs                766 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	rq->start_qs_rbdr_idx = qs->rbdr_cnt - 1;
qs                767 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	rq->cont_rbdr_qs = qs->vnic_id;
qs                768 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	rq->cont_qs_rbdr_idx = qs->rbdr_cnt - 1;
qs                777 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	mbx.rq.qs_num = qs->vnic_id;
qs                788 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 		     (qs->vnic_id << 0);
qs                817 drivers/net/ethernet/cavium/thunder/nicvf_queues.c void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
qs                823 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	cq = &qs->cq[qidx];
qs                827 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 		nicvf_reclaim_cmp_queue(nic, qs, qidx);
qs                847 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	cq_cfg.qsize = ilog2(qs->cq_len >> 10);
qs                858 drivers/net/ethernet/cavium/thunder/nicvf_queues.c static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs,
qs                865 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	sq = &qs->sq[qidx];
qs                869 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 		nicvf_reclaim_snd_queue(nic, qs, qidx);
qs                876 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	sq->cq_qs = qs->vnic_id;
qs                881 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	mbx.sq.qs_num = qs->vnic_id;
qs                896 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	sq_cfg.qsize = ilog2(qs->sq_len >> 10);
qs                901 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	sq_cfg.cq_limit = (CMP_QUEUE_PIPELINE_RSVD * 256) / qs->cq_len;
qs                916 drivers/net/ethernet/cavium/thunder/nicvf_queues.c static void nicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs,
qs                922 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	rbdr = &qs->rbdr[qidx];
qs                945 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 			      qidx, qs->rbdr_len - 1);
qs                956 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	struct queue_set *qs = nic->qs;
qs                959 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	if (!qs) {
qs                965 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	qs->enable = enable;
qs                966 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	qs->vnic_id = nic->vf_id;
qs                969 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	mbx.qs.msg = NIC_MBOX_MSG_QS_CFG;
qs                970 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	mbx.qs.num = qs->vnic_id;
qs                971 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	mbx.qs.sqs_count = nic->sqs_count;
qs                973 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	mbx.qs.cfg = 0;
qs                974 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	qs_cfg = (struct qs_cfg *)&mbx.qs.cfg;
qs                975 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	if (qs->enable) {
qs                980 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 		qs_cfg->vnic = qs->vnic_id;
qs                991 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	struct queue_set *qs = nic->qs;
qs                994 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
qs                995 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 		nicvf_free_rbdr(nic, &qs->rbdr[qidx]);
qs                998 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	for (qidx = 0; qidx < qs->cq_cnt; qidx++)
qs                999 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 		nicvf_free_cmp_queue(nic, &qs->cq[qidx]);
qs               1002 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	for (qidx = 0; qidx < qs->sq_cnt; qidx++)
qs               1003 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 		nicvf_free_snd_queue(nic, &qs->sq[qidx]);
qs               1009 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	struct queue_set *qs = nic->qs;
qs               1012 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
qs               1013 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 		if (nicvf_init_rbdr(nic, &qs->rbdr[qidx], qs->rbdr_len,
qs               1019 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	for (qidx = 0; qidx < qs->sq_cnt; qidx++) {
qs               1020 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 		if (nicvf_init_snd_queue(nic, &qs->sq[qidx], qs->sq_len, qidx))
qs               1025 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
qs               1026 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 		if (nicvf_init_cmp_queue(nic, &qs->cq[qidx], qs->cq_len))
qs               1038 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	struct queue_set *qs;
qs               1040 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	qs = devm_kzalloc(&nic->pdev->dev, sizeof(*qs), GFP_KERNEL);
qs               1041 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	if (!qs)
qs               1043 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	nic->qs = qs;
qs               1046 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	qs->rbdr_cnt = DEFAULT_RBDR_CNT;
qs               1047 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	qs->rq_cnt = min_t(u8, MAX_RCV_QUEUES_PER_QS, num_online_cpus());
qs               1048 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	qs->sq_cnt = min_t(u8, MAX_SND_QUEUES_PER_QS, num_online_cpus());
qs               1049 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	qs->cq_cnt = max_t(u8, qs->rq_cnt, qs->sq_cnt);
qs               1052 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	qs->rbdr_len = RCV_BUF_COUNT;
qs               1053 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	qs->sq_len = SND_QUEUE_LEN;
qs               1054 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	qs->cq_len = CMP_QUEUE_LEN;
qs               1056 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	nic->rx_queues = qs->rq_cnt;
qs               1057 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	nic->tx_queues = qs->sq_cnt;
qs               1066 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	struct queue_set *qs = nic->qs;
qs               1067 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	struct queue_set *pqs = nic->pnicvf->qs;
qs               1070 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	if (!qs)
qs               1078 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 		qs->cq_len = pqs->cq_len;
qs               1079 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 		qs->sq_len = pqs->sq_len;
qs               1086 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 		for (qidx = 0; qidx < qs->sq_cnt; qidx++)
qs               1087 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 			nicvf_snd_queue_config(nic, qs, qidx, enable);
qs               1088 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 		for (qidx = 0; qidx < qs->cq_cnt; qidx++)
qs               1089 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 			nicvf_cmp_queue_config(nic, qs, qidx, enable);
qs               1090 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 		for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
qs               1091 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 			nicvf_rbdr_config(nic, qs, qidx, enable);
qs               1092 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 		for (qidx = 0; qidx < qs->rq_cnt; qidx++)
qs               1093 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 			nicvf_rcv_queue_config(nic, qs, qidx, enable);
qs               1095 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 		for (qidx = 0; qidx < qs->rq_cnt; qidx++)
qs               1096 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 			nicvf_rcv_queue_config(nic, qs, qidx, disable);
qs               1097 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 		for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
qs               1098 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 			nicvf_rbdr_config(nic, qs, qidx, disable);
qs               1099 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 		for (qidx = 0; qidx < qs->sq_cnt; qidx++)
qs               1100 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 			nicvf_snd_queue_config(nic, qs, qidx, disable);
qs               1101 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 		for (qidx = 0; qidx < qs->cq_cnt; qidx++)
qs               1102 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 			nicvf_cmp_queue_config(nic, qs, qidx, disable);
qs               1819 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	rq = &nic->qs->rq[rq_idx];
qs               1832 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	sq = &nic->qs->sq[sq_idx];
qs                335 drivers/net/ethernet/cavium/thunder/nicvf_queues.h void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
qs                 68 drivers/net/ethernet/chelsio/cxgb3/adapter.h 	struct sge_qset *qs;
qs                216 drivers/net/ethernet/chelsio/cxgb3/adapter.h 	struct sge_qset qs[SGE_QSETS];
qs                324 drivers/net/ethernet/chelsio/cxgb3/adapter.h void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p);
qs                411 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 							  adap->sge.qs[qidx].
qs                414 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 					  &adap->sge.qs[qidx]);
qs                418 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 						 &adap->sge.qs[qidx]);
qs                438 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 				 &adapter->sge.qs[i]);
qs                448 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
qs                461 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
qs                597 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		struct sge_qset *qs = &adap->sge.qs[i];
qs                599 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		if (qs->adap)
qs                601 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 				t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | V_EGRCNTX(qs->txq[j].cntxt_id));
qs                610 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		struct sge_qset *qs = &adap->sge.qs[i];
qs                612 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		if (qs->adap)
qs                613 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 			netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
qs                635 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		if (adap->sge.qs[i].adap)
qs                636 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 			napi_disable(&adap->sge.qs[i].napi);
qs                643 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		if (adap->sge.qs[i].adap)
qs                644 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 			napi_enable(&adap->sge.qs[i].napi);
qs                667 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		pi->qs = &adap->sge.qs[pi->first_qset];
qs               1173 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		struct sge_rspq *q = &adap->sge.qs[i].rspq;
qs               1279 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 						      adap->sge.qs[0].rspq.
qs               1660 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		tot += adapter->sge.qs[i].port_stats[idx];
qs               2006 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	struct sge_qset *qs;
qs               2014 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		qs = &adapter->sge.qs[i];
qs               2016 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		t3_update_qset_coalesce(qs, qsp);
qs               2219 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 			struct sge_qset *qs =
qs               2220 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 				&adapter->sge.qs[t.qset_idx];
qs               2223 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 			t3_update_qset_coalesce(qs, q);
qs               2623 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		struct sge_qset *qs = &adapter->sge.qs[qidx];
qs               2627 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 			source = qs;
qs               2631 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		t3_intr_handler(adapter, qs->rspq.polling) (0, source);
qs               2771 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		struct sge_qset *qs = &adapter->sge.qs[0];
qs               2780 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 			qs->fl[i].empty += (v & 1);
qs               2782 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 				qs++;
qs                730 drivers/net/ethernet/chelsio/cxgb3/sge.c static void init_qset_cntxt(struct sge_qset *qs, unsigned int id)
qs                732 drivers/net/ethernet/chelsio/cxgb3/sge.c 	qs->rspq.cntxt_id = id;
qs                733 drivers/net/ethernet/chelsio/cxgb3/sge.c 	qs->fl[0].cntxt_id = 2 * id;
qs                734 drivers/net/ethernet/chelsio/cxgb3/sge.c 	qs->fl[1].cntxt_id = 2 * id + 1;
qs                735 drivers/net/ethernet/chelsio/cxgb3/sge.c 	qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id;
qs                736 drivers/net/ethernet/chelsio/cxgb3/sge.c 	qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id;
qs                737 drivers/net/ethernet/chelsio/cxgb3/sge.c 	qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id;
qs                738 drivers/net/ethernet/chelsio/cxgb3/sge.c 	qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id;
qs                739 drivers/net/ethernet/chelsio/cxgb3/sge.c 	qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id;
qs               1249 drivers/net/ethernet/chelsio/cxgb3/sge.c 				    struct sge_qset *qs, struct sge_txq *q)
qs               1252 drivers/net/ethernet/chelsio/cxgb3/sge.c 	set_bit(TXQ_ETH, &qs->txq_stopped);
qs               1270 drivers/net/ethernet/chelsio/cxgb3/sge.c 	struct sge_qset *qs;
qs               1284 drivers/net/ethernet/chelsio/cxgb3/sge.c 	qs = &pi->qs[qidx];
qs               1285 drivers/net/ethernet/chelsio/cxgb3/sge.c 	q = &qs->txq[TXQ_ETH];
qs               1294 drivers/net/ethernet/chelsio/cxgb3/sge.c 		t3_stop_tx_queue(txq, qs, q);
qs               1311 drivers/net/ethernet/chelsio/cxgb3/sge.c 		t3_stop_tx_queue(txq, qs, q);
qs               1314 drivers/net/ethernet/chelsio/cxgb3/sge.c 		    test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
qs               1333 drivers/net/ethernet/chelsio/cxgb3/sge.c 		qs->port_stats[SGE_PSTAT_TX_CSUM]++;
qs               1335 drivers/net/ethernet/chelsio/cxgb3/sge.c 		qs->port_stats[SGE_PSTAT_TSO]++;
qs               1337 drivers/net/ethernet/chelsio/cxgb3/sge.c 		qs->port_stats[SGE_PSTAT_VLANINS]++;
qs               1430 drivers/net/ethernet/chelsio/cxgb3/sge.c 		struct sge_qset *qs = txq_to_qset(q, qid);
qs               1432 drivers/net/ethernet/chelsio/cxgb3/sge.c 		set_bit(qid, &qs->txq_stopped);
qs               1436 drivers/net/ethernet/chelsio/cxgb3/sge.c 		    test_and_clear_bit(qid, &qs->txq_stopped))
qs               1526 drivers/net/ethernet/chelsio/cxgb3/sge.c 	struct sge_qset *qs = (struct sge_qset *)data;
qs               1527 drivers/net/ethernet/chelsio/cxgb3/sge.c 	struct sge_txq *q = &qs->txq[TXQ_CTRL];
qs               1545 drivers/net/ethernet/chelsio/cxgb3/sge.c 		set_bit(TXQ_CTRL, &qs->txq_stopped);
qs               1549 drivers/net/ethernet/chelsio/cxgb3/sge.c 		    test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped))
qs               1556 drivers/net/ethernet/chelsio/cxgb3/sge.c 	t3_write_reg(qs->adap, A_SG_KDOORBELL,
qs               1567 drivers/net/ethernet/chelsio/cxgb3/sge.c 	ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
qs               1743 drivers/net/ethernet/chelsio/cxgb3/sge.c 	struct sge_qset *qs = (struct sge_qset *)data;
qs               1744 drivers/net/ethernet/chelsio/cxgb3/sge.c 	struct sge_txq *q = &qs->txq[TXQ_OFLD];
qs               1745 drivers/net/ethernet/chelsio/cxgb3/sge.c 	const struct port_info *pi = netdev_priv(qs->netdev);
qs               1757 drivers/net/ethernet/chelsio/cxgb3/sge.c 			set_bit(TXQ_OFLD, &qs->txq_stopped);
qs               1761 drivers/net/ethernet/chelsio/cxgb3/sge.c 			    test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped))
qs               1835 drivers/net/ethernet/chelsio/cxgb3/sge.c 	struct sge_qset *qs = &adap->sge.qs[queue_set(skb)];
qs               1838 drivers/net/ethernet/chelsio/cxgb3/sge.c 		return ctrl_xmit(adap, &qs->txq[TXQ_CTRL], skb);
qs               1840 drivers/net/ethernet/chelsio/cxgb3/sge.c 	return ofld_xmit(adap, &qs->txq[TXQ_OFLD], skb);
qs               1859 drivers/net/ethernet/chelsio/cxgb3/sge.c 		struct sge_qset *qs = rspq_to_qset(q);
qs               1861 drivers/net/ethernet/chelsio/cxgb3/sge.c 		napi_schedule(&qs->napi);
qs               1897 drivers/net/ethernet/chelsio/cxgb3/sge.c 	struct sge_qset *qs = container_of(napi, struct sge_qset, napi);
qs               1898 drivers/net/ethernet/chelsio/cxgb3/sge.c 	struct sge_rspq *q = &qs->rspq;
qs               1899 drivers/net/ethernet/chelsio/cxgb3/sge.c 	struct adapter *adapter = qs->adap;
qs               1984 drivers/net/ethernet/chelsio/cxgb3/sge.c static void restart_tx(struct sge_qset *qs)
qs               1986 drivers/net/ethernet/chelsio/cxgb3/sge.c 	if (test_bit(TXQ_ETH, &qs->txq_stopped) &&
qs               1987 drivers/net/ethernet/chelsio/cxgb3/sge.c 	    should_restart_tx(&qs->txq[TXQ_ETH]) &&
qs               1988 drivers/net/ethernet/chelsio/cxgb3/sge.c 	    test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
qs               1989 drivers/net/ethernet/chelsio/cxgb3/sge.c 		qs->txq[TXQ_ETH].restarts++;
qs               1990 drivers/net/ethernet/chelsio/cxgb3/sge.c 		if (netif_running(qs->netdev))
qs               1991 drivers/net/ethernet/chelsio/cxgb3/sge.c 			netif_tx_wake_queue(qs->tx_q);
qs               1994 drivers/net/ethernet/chelsio/cxgb3/sge.c 	if (test_bit(TXQ_OFLD, &qs->txq_stopped) &&
qs               1995 drivers/net/ethernet/chelsio/cxgb3/sge.c 	    should_restart_tx(&qs->txq[TXQ_OFLD]) &&
qs               1996 drivers/net/ethernet/chelsio/cxgb3/sge.c 	    test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) {
qs               1997 drivers/net/ethernet/chelsio/cxgb3/sge.c 		qs->txq[TXQ_OFLD].restarts++;
qs               1998 drivers/net/ethernet/chelsio/cxgb3/sge.c 		tasklet_schedule(&qs->txq[TXQ_OFLD].qresume_tsk);
qs               2000 drivers/net/ethernet/chelsio/cxgb3/sge.c 	if (test_bit(TXQ_CTRL, &qs->txq_stopped) &&
qs               2001 drivers/net/ethernet/chelsio/cxgb3/sge.c 	    should_restart_tx(&qs->txq[TXQ_CTRL]) &&
qs               2002 drivers/net/ethernet/chelsio/cxgb3/sge.c 	    test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) {
qs               2003 drivers/net/ethernet/chelsio/cxgb3/sge.c 		qs->txq[TXQ_CTRL].restarts++;
qs               2004 drivers/net/ethernet/chelsio/cxgb3/sge.c 		tasklet_schedule(&qs->txq[TXQ_CTRL].qresume_tsk);
qs               2082 drivers/net/ethernet/chelsio/cxgb3/sge.c 	struct sge_qset *qs = rspq_to_qset(rq);
qs               2090 drivers/net/ethernet/chelsio/cxgb3/sge.c 		qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
qs               2094 drivers/net/ethernet/chelsio/cxgb3/sge.c 	skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]);
qs               2097 drivers/net/ethernet/chelsio/cxgb3/sge.c 		qs->port_stats[SGE_PSTAT_VLANEX]++;
qs               2102 drivers/net/ethernet/chelsio/cxgb3/sge.c 			napi_gro_receive(&qs->napi, skb);
qs               2128 drivers/net/ethernet/chelsio/cxgb3/sge.c static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
qs               2132 drivers/net/ethernet/chelsio/cxgb3/sge.c 	struct port_info *pi = netdev_priv(qs->netdev);
qs               2139 drivers/net/ethernet/chelsio/cxgb3/sge.c 	if (!qs->nomem) {
qs               2140 drivers/net/ethernet/chelsio/cxgb3/sge.c 		skb = napi_get_frags(&qs->napi);
qs               2141 drivers/net/ethernet/chelsio/cxgb3/sge.c 		qs->nomem = !skb;
qs               2161 drivers/net/ethernet/chelsio/cxgb3/sge.c 			qs->nomem = 0;
qs               2170 drivers/net/ethernet/chelsio/cxgb3/sge.c 		cpl = qs->lro_va = sd->pg_chunk.va + 2;
qs               2172 drivers/net/ethernet/chelsio/cxgb3/sge.c 		if ((qs->netdev->features & NETIF_F_RXCSUM) &&
qs               2175 drivers/net/ethernet/chelsio/cxgb3/sge.c 			qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
qs               2179 drivers/net/ethernet/chelsio/cxgb3/sge.c 		cpl = qs->lro_va;
qs               2196 drivers/net/ethernet/chelsio/cxgb3/sge.c 	skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]);
qs               2199 drivers/net/ethernet/chelsio/cxgb3/sge.c 		qs->port_stats[SGE_PSTAT_VLANEX]++;
qs               2202 drivers/net/ethernet/chelsio/cxgb3/sge.c 	napi_gro_frags(&qs->napi);
qs               2214 drivers/net/ethernet/chelsio/cxgb3/sge.c static inline void handle_rsp_cntrl_info(struct sge_qset *qs, u32 flags)
qs               2220 drivers/net/ethernet/chelsio/cxgb3/sge.c 		clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags);
qs               2225 drivers/net/ethernet/chelsio/cxgb3/sge.c 		qs->txq[TXQ_ETH].processed += credits;
qs               2229 drivers/net/ethernet/chelsio/cxgb3/sge.c 		qs->txq[TXQ_CTRL].processed += credits;
qs               2233 drivers/net/ethernet/chelsio/cxgb3/sge.c 		clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags);
qs               2237 drivers/net/ethernet/chelsio/cxgb3/sge.c 		qs->txq[TXQ_OFLD].processed += credits;
qs               2250 drivers/net/ethernet/chelsio/cxgb3/sge.c static void check_ring_db(struct adapter *adap, struct sge_qset *qs,
qs               2254 drivers/net/ethernet/chelsio/cxgb3/sge.c 		struct sge_txq *txq = &qs->txq[TXQ_ETH];
qs               2265 drivers/net/ethernet/chelsio/cxgb3/sge.c 		struct sge_txq *txq = &qs->txq[TXQ_OFLD];
qs               2320 drivers/net/ethernet/chelsio/cxgb3/sge.c static int process_responses(struct adapter *adap, struct sge_qset *qs,
qs               2323 drivers/net/ethernet/chelsio/cxgb3/sge.c 	struct sge_rspq *q = &qs->rspq;
qs               2334 drivers/net/ethernet/chelsio/cxgb3/sge.c 		int lro = !!(qs->netdev->features & NETIF_F_GRO);
qs               2371 drivers/net/ethernet/chelsio/cxgb3/sge.c 			fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
qs               2381 drivers/net/ethernet/chelsio/cxgb3/sge.c 					lro_add_page(adap, qs, fl,
qs               2409 drivers/net/ethernet/chelsio/cxgb3/sge.c 			handle_rsp_cntrl_info(qs, flags);
qs               2451 drivers/net/ethernet/chelsio/cxgb3/sge.c 		check_ring_db(adap, qs, sleeping);
qs               2454 drivers/net/ethernet/chelsio/cxgb3/sge.c 	if (unlikely(qs->txq_stopped != 0))
qs               2455 drivers/net/ethernet/chelsio/cxgb3/sge.c 		restart_tx(qs);
qs               2477 drivers/net/ethernet/chelsio/cxgb3/sge.c 	struct sge_qset *qs = container_of(napi, struct sge_qset, napi);
qs               2478 drivers/net/ethernet/chelsio/cxgb3/sge.c 	struct adapter *adap = qs->adap;
qs               2479 drivers/net/ethernet/chelsio/cxgb3/sge.c 	int work_done = process_responses(adap, qs, budget);
qs               2498 drivers/net/ethernet/chelsio/cxgb3/sge.c 		t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) |
qs               2499 drivers/net/ethernet/chelsio/cxgb3/sge.c 			     V_NEWTIMER(qs->rspq.next_holdoff) |
qs               2500 drivers/net/ethernet/chelsio/cxgb3/sge.c 			     V_NEWINDEX(qs->rspq.cidx));
qs               2527 drivers/net/ethernet/chelsio/cxgb3/sge.c static int process_pure_responses(struct adapter *adap, struct sge_qset *qs,
qs               2530 drivers/net/ethernet/chelsio/cxgb3/sge.c 	struct sge_rspq *q = &qs->rspq;
qs               2546 drivers/net/ethernet/chelsio/cxgb3/sge.c 			handle_rsp_cntrl_info(qs, flags);
qs               2560 drivers/net/ethernet/chelsio/cxgb3/sge.c 		check_ring_db(adap, qs, sleeping);
qs               2563 drivers/net/ethernet/chelsio/cxgb3/sge.c 	if (unlikely(qs->txq_stopped != 0))
qs               2564 drivers/net/ethernet/chelsio/cxgb3/sge.c 		restart_tx(qs);
qs               2586 drivers/net/ethernet/chelsio/cxgb3/sge.c 	struct sge_qset *qs = rspq_to_qset(q);
qs               2592 drivers/net/ethernet/chelsio/cxgb3/sge.c 	if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) {
qs               2597 drivers/net/ethernet/chelsio/cxgb3/sge.c 	napi_schedule(&qs->napi);
qs               2607 drivers/net/ethernet/chelsio/cxgb3/sge.c 	struct sge_qset *qs = cookie;
qs               2608 drivers/net/ethernet/chelsio/cxgb3/sge.c 	struct adapter *adap = qs->adap;
qs               2609 drivers/net/ethernet/chelsio/cxgb3/sge.c 	struct sge_rspq *q = &qs->rspq;
qs               2612 drivers/net/ethernet/chelsio/cxgb3/sge.c 	if (process_responses(adap, qs, -1) == 0)
qs               2626 drivers/net/ethernet/chelsio/cxgb3/sge.c 	struct sge_qset *qs = cookie;
qs               2627 drivers/net/ethernet/chelsio/cxgb3/sge.c 	struct sge_rspq *q = &qs->rspq;
qs               2631 drivers/net/ethernet/chelsio/cxgb3/sge.c 	if (handle_responses(qs->adap, q) < 0)
qs               2647 drivers/net/ethernet/chelsio/cxgb3/sge.c 	struct sge_rspq *q = &adap->sge.qs[0].rspq;
qs               2651 drivers/net/ethernet/chelsio/cxgb3/sge.c 	if (process_responses(adap, &adap->sge.qs[0], -1)) {
qs               2658 drivers/net/ethernet/chelsio/cxgb3/sge.c 	    process_responses(adap, &adap->sge.qs[1], -1)) {
qs               2659 drivers/net/ethernet/chelsio/cxgb3/sge.c 		struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
qs               2674 drivers/net/ethernet/chelsio/cxgb3/sge.c static int rspq_check_napi(struct sge_qset *qs)
qs               2676 drivers/net/ethernet/chelsio/cxgb3/sge.c 	struct sge_rspq *q = &qs->rspq;
qs               2678 drivers/net/ethernet/chelsio/cxgb3/sge.c 	if (!napi_is_scheduled(&qs->napi) &&
qs               2680 drivers/net/ethernet/chelsio/cxgb3/sge.c 		napi_schedule(&qs->napi);
qs               2697 drivers/net/ethernet/chelsio/cxgb3/sge.c 	struct sge_rspq *q = &adap->sge.qs[0].rspq;
qs               2701 drivers/net/ethernet/chelsio/cxgb3/sge.c 	new_packets = rspq_check_napi(&adap->sge.qs[0]);
qs               2703 drivers/net/ethernet/chelsio/cxgb3/sge.c 		new_packets += rspq_check_napi(&adap->sge.qs[1]);
qs               2735 drivers/net/ethernet/chelsio/cxgb3/sge.c 	struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
qs               2736 drivers/net/ethernet/chelsio/cxgb3/sge.c 	struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
qs               2773 drivers/net/ethernet/chelsio/cxgb3/sge.c 	struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
qs               2790 drivers/net/ethernet/chelsio/cxgb3/sge.c 		process_responses_gts(adap, &adap->sge.qs[1].rspq);
qs               2807 drivers/net/ethernet/chelsio/cxgb3/sge.c 	struct sge_qset *qs0 = &adap->sge.qs[0];
qs               2825 drivers/net/ethernet/chelsio/cxgb3/sge.c 		napi_schedule(&adap->sge.qs[1].napi);
qs               2923 drivers/net/ethernet/chelsio/cxgb3/sge.c 	struct sge_qset *qs = from_timer(qs, t, tx_reclaim_timer);
qs               2924 drivers/net/ethernet/chelsio/cxgb3/sge.c 	struct port_info *pi = netdev_priv(qs->netdev);
qs               2929 drivers/net/ethernet/chelsio/cxgb3/sge.c 	if (__netif_tx_trylock(qs->tx_q)) {
qs               2930 drivers/net/ethernet/chelsio/cxgb3/sge.c                 tbd[TXQ_ETH] = reclaim_completed_tx(adap, &qs->txq[TXQ_ETH],
qs               2932 drivers/net/ethernet/chelsio/cxgb3/sge.c 		__netif_tx_unlock(qs->tx_q);
qs               2935 drivers/net/ethernet/chelsio/cxgb3/sge.c 	if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) {
qs               2936 drivers/net/ethernet/chelsio/cxgb3/sge.c 		tbd[TXQ_OFLD] = reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD],
qs               2938 drivers/net/ethernet/chelsio/cxgb3/sge.c 		spin_unlock(&qs->txq[TXQ_OFLD].lock);
qs               2944 drivers/net/ethernet/chelsio/cxgb3/sge.c 	mod_timer(&qs->tx_reclaim_timer, jiffies + next_period);
qs               2964 drivers/net/ethernet/chelsio/cxgb3/sge.c 	struct sge_qset *qs = from_timer(qs, t, rx_reclaim_timer);
qs               2965 drivers/net/ethernet/chelsio/cxgb3/sge.c 	struct port_info *pi = netdev_priv(qs->netdev);
qs               2970 drivers/net/ethernet/chelsio/cxgb3/sge.c 	       &qs->rspq.lock : &adap->sge.qs[0].rspq.lock;
qs               2975 drivers/net/ethernet/chelsio/cxgb3/sge.c 	if (napi_is_scheduled(&qs->napi))
qs               2981 drivers/net/ethernet/chelsio/cxgb3/sge.c 		if (status & (1 << qs->rspq.cntxt_id)) {
qs               2982 drivers/net/ethernet/chelsio/cxgb3/sge.c 			qs->rspq.starved++;
qs               2983 drivers/net/ethernet/chelsio/cxgb3/sge.c 			if (qs->rspq.credits) {
qs               2984 drivers/net/ethernet/chelsio/cxgb3/sge.c 				qs->rspq.credits--;
qs               2985 drivers/net/ethernet/chelsio/cxgb3/sge.c 				refill_rspq(adap, &qs->rspq, 1);
qs               2986 drivers/net/ethernet/chelsio/cxgb3/sge.c 				qs->rspq.restarted++;
qs               2988 drivers/net/ethernet/chelsio/cxgb3/sge.c 					     1 << qs->rspq.cntxt_id);
qs               2993 drivers/net/ethernet/chelsio/cxgb3/sge.c 	if (qs->fl[0].credits < qs->fl[0].size)
qs               2994 drivers/net/ethernet/chelsio/cxgb3/sge.c 		__refill_fl(adap, &qs->fl[0]);
qs               2995 drivers/net/ethernet/chelsio/cxgb3/sge.c 	if (qs->fl[1].credits < qs->fl[1].size)
qs               2996 drivers/net/ethernet/chelsio/cxgb3/sge.c 		__refill_fl(adap, &qs->fl[1]);
qs               3001 drivers/net/ethernet/chelsio/cxgb3/sge.c 	mod_timer(&qs->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD);
qs               3012 drivers/net/ethernet/chelsio/cxgb3/sge.c void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
qs               3014 drivers/net/ethernet/chelsio/cxgb3/sge.c 	qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);/* can't be 0 */
qs               3015 drivers/net/ethernet/chelsio/cxgb3/sge.c 	qs->rspq.polling = p->polling;
qs               3016 drivers/net/ethernet/chelsio/cxgb3/sge.c 	qs->napi.poll = p->polling ? napi_rx_handler : ofld_poll;
qs               3041 drivers/net/ethernet/chelsio/cxgb3/sge.c 	struct sge_qset *q = &adapter->sge.qs[id];
qs               3215 drivers/net/ethernet/chelsio/cxgb3/sge.c 		struct sge_qset *q = &adap->sge.qs[i];
qs               3238 drivers/net/ethernet/chelsio/cxgb3/sge.c 		struct sge_qset *q = &adap->sge.qs[i];
qs               3258 drivers/net/ethernet/chelsio/cxgb3/sge.c 		t3_free_qset(adap, &adap->sge.qs[i]);
qs               3293 drivers/net/ethernet/chelsio/cxgb3/sge.c 			struct sge_qset *qs = &adap->sge.qs[i];
qs               3295 drivers/net/ethernet/chelsio/cxgb3/sge.c 			tasklet_kill(&qs->txq[TXQ_OFLD].qresume_tsk);
qs               3296 drivers/net/ethernet/chelsio/cxgb3/sge.c 			tasklet_kill(&qs->txq[TXQ_CTRL].qresume_tsk);
qs                369 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 		int qs, msi;
qs                371 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 		for (qs = 0, msi = MSIX_IQFLINT; qs < pi->nqsets; qs++, msi++) {
qs                373 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 				 "%s-%d", dev->name, qs);
qs                636 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 		int qs;
qs                638 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 		for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) {
qs                646 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 					     netdev_get_tx_queue(dev, qs),
qs                651 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 			rxq->rspq.idx = qs;
qs                667 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 		int qs;
qs                669 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 		for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) {
qs                709 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 		int qs, err;
qs                711 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 		for (qs = 0; qs < pi->nqsets; qs++)
qs                712 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 			rss[qs] = rxq[qs].rspq.abs_id;
qs               1625 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 	int qs;
qs               1639 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 	for (qs = pi->first_qset; qs < pi->first_qset + pi->nqsets; qs++) {
qs               1640 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 		s->ethrxq[qs].fl.size = rp->rx_pending + MIN_FL_RESID;
qs               1641 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 		s->ethrxq[qs].rspq.size = rp->rx_mini_pending;
qs               1642 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 		s->ethtxq[qs].q.size = rp->tx_pending;
qs               1806 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 	int qs;
qs               1809 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 	for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) {
qs               2057 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 	int qs, r = (uintptr_t)v - 1;
qs               2065 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 			for (qs = 0; qs < n; ++qs) \
qs               2070 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 	#define T(s, v)		S3("u", s, txq[qs].v)
qs               2071 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 	#define R(s, v)		S3("u", s, rxq[qs].v)
qs               2080 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 		  (rxq[qs].rspq.netdev
qs               2081 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 		   ? rxq[qs].rspq.netdev->name
qs               2084 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 		   (rxq[qs].rspq.netdev
qs               2086 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 		       netdev_priv(rxq[qs].rspq.netdev))->port_id
qs               2096 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 		S3("u", "Intr delay:", qtimer_val(adapter, &rxq[qs].rspq));
qs               2098 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 		   adapter->sge.counter_val[rxq[qs].rspq.pktcnt_idx]);
qs               2210 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 	int qs, r = (uintptr_t)v - 1;
qs               2218 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 			for (qs = 0; qs < n; ++qs) \
qs               2224 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 	#define T3(fmt, s, v)	S3(fmt, s, txq[qs].v)
qs               2227 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 	#define R3(fmt, s, v)	S3(fmt, s, rxq[qs].v)
qs               2237 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 		  (rxq[qs].rspq.netdev
qs               2238 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 		   ? rxq[qs].rspq.netdev->name
qs               2734 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 	int q10g, n10g, qidx, pidx, qs;
qs               2791 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 	for (qs = 0; qs < s->max_ethqsets; qs++) {
qs               2792 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 		struct sge_eth_rxq *rxq = &s->ethrxq[qs];
qs               2793 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 		struct sge_eth_txq *txq = &s->ethtxq[qs];
qs               2570 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	int qs;
qs               2572 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	for (qs = 0; qs < adapter->sge.ethqsets; qs++, rxq++, txq++) {
qs                297 drivers/net/ethernet/hisilicon/hns/hnae.c 		hnae_fini_queue(handle->qs[i]);
qs                303 drivers/net/ethernet/hisilicon/hns/hnae.c 		ret = hnae_init_queue(handle, handle->qs[i], handle->dev);
qs                310 drivers/net/ethernet/hisilicon/hns/hnae.c 		hnae_fini_queue(handle->qs[j]);
qs                349 drivers/net/ethernet/hisilicon/hns/hnae.c 		ret = hnae_init_queue(handle, handle->qs[i], dev);
qs                362 drivers/net/ethernet/hisilicon/hns/hnae.c 		hnae_fini_queue(handle->qs[j]);
qs                376 drivers/net/ethernet/hisilicon/hns/hnae.c 		hnae_fini_queue(h->qs[i]);
qs                567 drivers/net/ethernet/hisilicon/hns/hnae.h 	struct hnae_queue **qs;  /* array base of all queues */
qs                671 drivers/net/ethernet/hisilicon/hns/hnae.h 		ring = &h->qs[i]->rx_ring;
qs                686 drivers/net/ethernet/hisilicon/hns/hnae.h 		ring = &h->qs[i]->rx_ring;
qs                113 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c 	ae_handle->qs = (struct hnae_queue **)(&ae_handle->qs + 1);
qs                115 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c 		ae_handle->qs[i] = &ring_pair_cb->q;
qs                116 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c 		ae_handle->qs[i]->rx_ring.q = ae_handle->qs[i];
qs                117 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c 		ae_handle->qs[i]->tx_ring.q = ae_handle->qs[i];
qs                147 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c 		hns_ae_get_ring_pair(handle->qs[i])->used_by_vf = 0;
qs                161 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c 		ret = hns_rcb_wait_tx_ring_clean(handle->qs[i]);
qs                193 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c 		hns_rcb_ring_enable_hw(handle->qs[i], val);
qs                319 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c 			q = handle->qs[i];
qs                347 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c 			hns_rcb_int_clr_hw(handle->qs[k],
qs                350 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c 			hns_rcbv2_int_clr_hw(handle->qs[k],
qs                366 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c 	hns_rcb_wait_fbd_clean(handle->qs, handle->q_num, RCB_INT_FLAG_TX);
qs                377 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c 	hns_rcb_wait_fbd_clean(handle->qs, handle->q_num, RCB_INT_FLAG_RX);
qs                544 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c 		container_of(handle->qs[0], struct ring_pair_cb, q);
qs                556 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c 		container_of(handle->qs[0], struct ring_pair_cb, q);
qs                574 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c 		container_of(handle->qs[0], struct ring_pair_cb, q);
qs                585 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c 		container_of(handle->qs[0], struct ring_pair_cb, q);
qs                663 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c 		queue = handle->qs[idx];
qs                742 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c 		hns_rcb_get_stats(handle->qs[idx], p);
qs                873 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c 		hns_rcb_get_ring_regs(handle->qs[i], p);
qs                 40 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag)
qs                 49 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 			fbd_num += dsaf_read_dev(qs[i],
qs                 52 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 			fbd_num += dsaf_read_dev(qs[i],
qs                 61 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 		dev_err(qs[i]->handle->owner_dev,
qs                 65 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c int hns_rcb_wait_tx_ring_clean(struct hnae_queue *qs)
qs                 70 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	tail = dsaf_read_dev(&qs->tx_ring, RCB_REG_TAIL);
qs                 73 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 		head = dsaf_read_dev(&qs->tx_ring, RCB_REG_HEAD);
qs                 81 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 		dev_err(qs->dev->dev, "rcb wait timeout, head not equal to tail.\n");
qs                134 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag);
qs                135 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h int hns_rcb_wait_tx_ring_clean(struct hnae_queue *qs);
qs                371 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	hnae_queue_xmit(priv->ae_handle->qs[skb->queue_mapping], buf_num);
qs               1655 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		ring = &h->qs[i]->rx_ring;
qs               1906 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		tx_bytes += h->qs[idx]->tx_ring.stats.tx_bytes;
qs               1907 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		tx_pkts += h->qs[idx]->tx_ring.stats.tx_pkts;
qs               1908 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		rx_bytes += h->qs[idx]->rx_ring.stats.rx_bytes;
qs               1909 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		rx_pkts += h->qs[idx]->rx_ring.stats.rx_pkts;
qs               2007 drivers/net/ethernet/hisilicon/hns/hns_enet.c 			i, h->qs[i]->tx_ring.next_to_clean);
qs               2009 drivers/net/ethernet/hisilicon/hns/hns_enet.c 			i, h->qs[i]->tx_ring.next_to_use);
qs               2011 drivers/net/ethernet/hisilicon/hns/hns_enet.c 			i, h->qs[i]->rx_ring.next_to_clean);
qs               2013 drivers/net/ethernet/hisilicon/hns/hns_enet.c 			i, h->qs[i]->rx_ring.next_to_use);
qs               2122 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		rd->ring = &h->qs[i]->tx_ring;
qs               2134 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		rd->ring = &h->qs[i - h->q_num]->rx_ring;
qs                672 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c 	queue = priv->ae_handle->qs[0];
qs               9099 drivers/scsi/lpfc/lpfc_init.c lpfc_sli4_release_queues(struct lpfc_queue ***qs, int max)
qs               9103 drivers/scsi/lpfc/lpfc_init.c 	if (*qs == NULL)
qs               9107 drivers/scsi/lpfc/lpfc_init.c 		__lpfc_sli4_release_queue(&(*qs)[idx]);
qs               9109 drivers/scsi/lpfc/lpfc_init.c 	kfree(*qs);
qs               9110 drivers/scsi/lpfc/lpfc_init.c 	*qs = NULL;
qs                413 drivers/soc/qcom/socinfo.c 	struct qcom_socinfo *qs;
qs                424 drivers/soc/qcom/socinfo.c 	qs = devm_kzalloc(&pdev->dev, sizeof(*qs), GFP_KERNEL);
qs                425 drivers/soc/qcom/socinfo.c 	if (!qs)
qs                428 drivers/soc/qcom/socinfo.c 	qs->attr.family = "Snapdragon";
qs                429 drivers/soc/qcom/socinfo.c 	qs->attr.machine = socinfo_machine(&pdev->dev,
qs                431 drivers/soc/qcom/socinfo.c 	qs->attr.revision = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%u.%u",
qs                435 drivers/soc/qcom/socinfo.c 		qs->attr.serial_number = devm_kasprintf(&pdev->dev, GFP_KERNEL,
qs                439 drivers/soc/qcom/socinfo.c 	qs->soc_dev = soc_device_register(&qs->attr);
qs                440 drivers/soc/qcom/socinfo.c 	if (IS_ERR(qs->soc_dev))
qs                441 drivers/soc/qcom/socinfo.c 		return PTR_ERR(qs->soc_dev);
qs                443 drivers/soc/qcom/socinfo.c 	socinfo_debugfs_init(qs, info);
qs                448 drivers/soc/qcom/socinfo.c 	platform_set_drvdata(pdev, qs->soc_dev);
qs                455 drivers/soc/qcom/socinfo.c 	struct qcom_socinfo *qs = platform_get_drvdata(pdev);
qs                457 drivers/soc/qcom/socinfo.c 	soc_device_unregister(qs->soc_dev);
qs                459 drivers/soc/qcom/socinfo.c 	socinfo_debugfs_exit(qs);
qs                382 drivers/staging/fieldbus/anybuss/host.c 	struct kfifo qs[3];
qs                875 drivers/staging/fieldbus/anybuss/host.c static bool qs_have_work(struct kfifo *qs, size_t num)
qs                881 drivers/staging/fieldbus/anybuss/host.c 	for (i = 0; i < num; i++, qs++) {
qs                882 drivers/staging/fieldbus/anybuss/host.c 		ret = kfifo_out_peek(qs, &t, sizeof(t));
qs                892 drivers/staging/fieldbus/anybuss/host.c 	struct kfifo *qs = cd->qs;
qs                893 drivers/staging/fieldbus/anybuss/host.c 	size_t nqs = ARRAY_SIZE(cd->qs);
qs                895 drivers/staging/fieldbus/anybuss/host.c 	for (i = 0; i < nqs; i++, qs++)
qs                896 drivers/staging/fieldbus/anybuss/host.c 		process_q(cd, qs);
qs                968 drivers/staging/fieldbus/anybuss/host.c 	struct kfifo *qs = cd->qs;
qs                969 drivers/staging/fieldbus/anybuss/host.c 	size_t nqs = ARRAY_SIZE(cd->qs);
qs                993 drivers/staging/fieldbus/anybuss/host.c 				qs_have_work(qs, nqs) ||
qs               1277 drivers/staging/fieldbus/anybuss/host.c 	for (i = 0; i < ARRAY_SIZE(cd->qs); i++) {
qs               1278 drivers/staging/fieldbus/anybuss/host.c 		ret = taskq_alloc(dev, &cd->qs[i]);
qs               1282 drivers/staging/fieldbus/anybuss/host.c 	if (WARN_ON(ARRAY_SIZE(cd->qs) < 3))
qs               1284 drivers/staging/fieldbus/anybuss/host.c 	cd->powerq = &cd->qs[0];
qs               1285 drivers/staging/fieldbus/anybuss/host.c 	cd->mboxq = &cd->qs[1];
qs               1286 drivers/staging/fieldbus/anybuss/host.c 	cd->areaq = &cd->qs[2];
qs                 96 fs/ocfs2/cluster/quorum.c 	struct o2quo_state *qs = &o2quo_state;
qs                 98 fs/ocfs2/cluster/quorum.c 	spin_lock(&qs->qs_lock);
qs                100 fs/ocfs2/cluster/quorum.c 	lowest_hb = find_first_bit(qs->qs_hb_bm, O2NM_MAX_NODES);
qs                102 fs/ocfs2/cluster/quorum.c 		lowest_reachable = test_bit(lowest_hb, qs->qs_conn_bm);
qs                105 fs/ocfs2/cluster/quorum.c 	     "lowest: %d (%sreachable)\n", qs->qs_heartbeating,
qs                106 fs/ocfs2/cluster/quorum.c 	     qs->qs_connected, lowest_hb, lowest_reachable ? "" : "un");
qs                108 fs/ocfs2/cluster/quorum.c 	if (!test_bit(o2nm_this_node(), qs->qs_hb_bm) ||
qs                109 fs/ocfs2/cluster/quorum.c 	    qs->qs_heartbeating == 1)
qs                112 fs/ocfs2/cluster/quorum.c 	if (qs->qs_heartbeating & 1) {
qs                115 fs/ocfs2/cluster/quorum.c 		quorum = (qs->qs_heartbeating + 1)/2;
qs                116 fs/ocfs2/cluster/quorum.c 		if (qs->qs_connected < quorum) {
qs                120 fs/ocfs2/cluster/quorum.c 			     qs->qs_connected, quorum,
qs                121 fs/ocfs2/cluster/quorum.c 			     qs->qs_heartbeating);
qs                129 fs/ocfs2/cluster/quorum.c 		quorum = qs->qs_heartbeating / 2;
qs                130 fs/ocfs2/cluster/quorum.c 		if (qs->qs_connected < quorum) {
qs                134 fs/ocfs2/cluster/quorum.c 			     qs->qs_connected, quorum,
qs                135 fs/ocfs2/cluster/quorum.c 			     qs->qs_heartbeating);
qs                138 fs/ocfs2/cluster/quorum.c 		else if ((qs->qs_connected == quorum) &&
qs                143 fs/ocfs2/cluster/quorum.c 			     "node %u\n", quorum, qs->qs_heartbeating,
qs                151 fs/ocfs2/cluster/quorum.c 		spin_unlock(&qs->qs_lock);
qs                156 fs/ocfs2/cluster/quorum.c 			qs->qs_heartbeating, qs->qs_connected, lowest_hb,
qs                158 fs/ocfs2/cluster/quorum.c 		spin_unlock(&qs->qs_lock);
qs                164 fs/ocfs2/cluster/quorum.c static void o2quo_set_hold(struct o2quo_state *qs, u8 node)
qs                166 fs/ocfs2/cluster/quorum.c 	assert_spin_locked(&qs->qs_lock);
qs                168 fs/ocfs2/cluster/quorum.c 	if (!test_and_set_bit(node, qs->qs_hold_bm)) {
qs                169 fs/ocfs2/cluster/quorum.c 		qs->qs_holds++;
qs                170 fs/ocfs2/cluster/quorum.c 		mlog_bug_on_msg(qs->qs_holds == O2NM_MAX_NODES,
qs                172 fs/ocfs2/cluster/quorum.c 		mlog(0, "node %u, %d total\n", node, qs->qs_holds);
qs                176 fs/ocfs2/cluster/quorum.c static void o2quo_clear_hold(struct o2quo_state *qs, u8 node)
qs                178 fs/ocfs2/cluster/quorum.c 	assert_spin_locked(&qs->qs_lock);
qs                180 fs/ocfs2/cluster/quorum.c 	if (test_and_clear_bit(node, qs->qs_hold_bm)) {
qs                181 fs/ocfs2/cluster/quorum.c 		mlog(0, "node %u, %d total\n", node, qs->qs_holds - 1);
qs                182 fs/ocfs2/cluster/quorum.c 		if (--qs->qs_holds == 0) {
qs                183 fs/ocfs2/cluster/quorum.c 			if (qs->qs_pending) {
qs                184 fs/ocfs2/cluster/quorum.c 				qs->qs_pending = 0;
qs                185 fs/ocfs2/cluster/quorum.c 				schedule_work(&qs->qs_work);
qs                188 fs/ocfs2/cluster/quorum.c 		mlog_bug_on_msg(qs->qs_holds < 0, "node %u, holds %d\n",
qs                189 fs/ocfs2/cluster/quorum.c 				node, qs->qs_holds);
qs                199 fs/ocfs2/cluster/quorum.c 	struct o2quo_state *qs = &o2quo_state;
qs                201 fs/ocfs2/cluster/quorum.c 	spin_lock(&qs->qs_lock);
qs                203 fs/ocfs2/cluster/quorum.c 	qs->qs_heartbeating++;
qs                204 fs/ocfs2/cluster/quorum.c 	mlog_bug_on_msg(qs->qs_heartbeating == O2NM_MAX_NODES,
qs                206 fs/ocfs2/cluster/quorum.c 	mlog_bug_on_msg(test_bit(node, qs->qs_hb_bm), "node %u\n", node);
qs                207 fs/ocfs2/cluster/quorum.c 	set_bit(node, qs->qs_hb_bm);
qs                209 fs/ocfs2/cluster/quorum.c 	mlog(0, "node %u, %d total\n", node, qs->qs_heartbeating);
qs                211 fs/ocfs2/cluster/quorum.c 	if (!test_bit(node, qs->qs_conn_bm))
qs                212 fs/ocfs2/cluster/quorum.c 		o2quo_set_hold(qs, node);
qs                214 fs/ocfs2/cluster/quorum.c 		o2quo_clear_hold(qs, node);
qs                216 fs/ocfs2/cluster/quorum.c 	spin_unlock(&qs->qs_lock);
qs                223 fs/ocfs2/cluster/quorum.c 	struct o2quo_state *qs = &o2quo_state;
qs                225 fs/ocfs2/cluster/quorum.c 	spin_lock(&qs->qs_lock);
qs                227 fs/ocfs2/cluster/quorum.c 	qs->qs_heartbeating--;
qs                228 fs/ocfs2/cluster/quorum.c 	mlog_bug_on_msg(qs->qs_heartbeating < 0,
qs                230 fs/ocfs2/cluster/quorum.c 			node, qs->qs_heartbeating);
qs                231 fs/ocfs2/cluster/quorum.c 	mlog_bug_on_msg(!test_bit(node, qs->qs_hb_bm), "node %u\n", node);
qs                232 fs/ocfs2/cluster/quorum.c 	clear_bit(node, qs->qs_hb_bm);
qs                234 fs/ocfs2/cluster/quorum.c 	mlog(0, "node %u, %d total\n", node, qs->qs_heartbeating);
qs                236 fs/ocfs2/cluster/quorum.c 	o2quo_clear_hold(qs, node);
qs                238 fs/ocfs2/cluster/quorum.c 	spin_unlock(&qs->qs_lock);
qs                248 fs/ocfs2/cluster/quorum.c 	struct o2quo_state *qs = &o2quo_state;
qs                250 fs/ocfs2/cluster/quorum.c 	spin_lock(&qs->qs_lock);
qs                254 fs/ocfs2/cluster/quorum.c 	qs->qs_pending = 1;
qs                255 fs/ocfs2/cluster/quorum.c 	o2quo_clear_hold(qs, node);
qs                257 fs/ocfs2/cluster/quorum.c 	spin_unlock(&qs->qs_lock);
qs                267 fs/ocfs2/cluster/quorum.c 	struct o2quo_state *qs = &o2quo_state;
qs                269 fs/ocfs2/cluster/quorum.c 	spin_lock(&qs->qs_lock);
qs                271 fs/ocfs2/cluster/quorum.c 	qs->qs_connected++;
qs                272 fs/ocfs2/cluster/quorum.c 	mlog_bug_on_msg(qs->qs_connected == O2NM_MAX_NODES,
qs                274 fs/ocfs2/cluster/quorum.c 	mlog_bug_on_msg(test_bit(node, qs->qs_conn_bm), "node %u\n", node);
qs                275 fs/ocfs2/cluster/quorum.c 	set_bit(node, qs->qs_conn_bm);
qs                277 fs/ocfs2/cluster/quorum.c 	mlog(0, "node %u, %d total\n", node, qs->qs_connected);
qs                279 fs/ocfs2/cluster/quorum.c 	if (!test_bit(node, qs->qs_hb_bm))
qs                280 fs/ocfs2/cluster/quorum.c 		o2quo_set_hold(qs, node);
qs                282 fs/ocfs2/cluster/quorum.c 		o2quo_clear_hold(qs, node);
qs                284 fs/ocfs2/cluster/quorum.c 	spin_unlock(&qs->qs_lock);
qs                293 fs/ocfs2/cluster/quorum.c 	struct o2quo_state *qs = &o2quo_state;
qs                295 fs/ocfs2/cluster/quorum.c 	spin_lock(&qs->qs_lock);
qs                297 fs/ocfs2/cluster/quorum.c 	if (test_bit(node, qs->qs_conn_bm)) {
qs                298 fs/ocfs2/cluster/quorum.c 		qs->qs_connected--;
qs                299 fs/ocfs2/cluster/quorum.c 		mlog_bug_on_msg(qs->qs_connected < 0,
qs                301 fs/ocfs2/cluster/quorum.c 				node, qs->qs_connected);
qs                303 fs/ocfs2/cluster/quorum.c 		clear_bit(node, qs->qs_conn_bm);
qs                305 fs/ocfs2/cluster/quorum.c 		if (test_bit(node, qs->qs_hb_bm))
qs                306 fs/ocfs2/cluster/quorum.c 			o2quo_set_hold(qs, node);
qs                309 fs/ocfs2/cluster/quorum.c 	mlog(0, "node %u, %d total\n", node, qs->qs_connected);
qs                312 fs/ocfs2/cluster/quorum.c 	spin_unlock(&qs->qs_lock);
qs                317 fs/ocfs2/cluster/quorum.c 	struct o2quo_state *qs = &o2quo_state;
qs                319 fs/ocfs2/cluster/quorum.c 	spin_lock_init(&qs->qs_lock);
qs                320 fs/ocfs2/cluster/quorum.c 	INIT_WORK(&qs->qs_work, o2quo_make_decision);
qs                325 fs/ocfs2/cluster/quorum.c 	struct o2quo_state *qs = &o2quo_state;
qs                327 fs/ocfs2/cluster/quorum.c 	flush_work(&qs->qs_work);
qs                 46 fs/qnx4/inode.c 	struct qnx4_sb_info *qs;
qs                 49 fs/qnx4/inode.c 	qs = qnx4_sb(sb);
qs                 50 fs/qnx4/inode.c 	qs->Version = QNX4_VERSION;
qs                192 fs/qnx4/inode.c 	struct qnx4_sb_info *qs;
qs                194 fs/qnx4/inode.c 	qs = kzalloc(sizeof(struct qnx4_sb_info), GFP_KERNEL);
qs                195 fs/qnx4/inode.c 	if (!qs)
qs                197 fs/qnx4/inode.c 	s->s_fs_info = qs;
qs                241 fs/qnx4/inode.c 	struct qnx4_sb_info *qs = qnx4_sb(sb);
qs                243 fs/qnx4/inode.c 	if (qs) {
qs                244 fs/qnx4/inode.c 		kfree(qs->BitMap);
qs                245 fs/qnx4/inode.c 		kfree(qs);
qs                305 fs/qnx6/inode.c 	struct qnx6_sb_info *qs;
qs                310 fs/qnx6/inode.c 	qs = kzalloc(sizeof(struct qnx6_sb_info), GFP_KERNEL);
qs                311 fs/qnx6/inode.c 	if (!qs)
qs                313 fs/qnx6/inode.c 	s->s_fs_info = qs;
qs                480 fs/qnx6/inode.c 	kfree(qs);
qs                487 fs/qnx6/inode.c 	struct qnx6_sb_info *qs = QNX6_SB(sb);
qs                488 fs/qnx6/inode.c 	brelse(qs->sb_buf);
qs                489 fs/qnx6/inode.c 	iput(qs->longfile);
qs                490 fs/qnx6/inode.c 	iput(qs->inodes);
qs                491 fs/qnx6/inode.c 	kfree(qs);
qs                 30 kernel/bpf/queue_stack_maps.c static bool queue_stack_map_is_empty(struct bpf_queue_stack *qs)
qs                 32 kernel/bpf/queue_stack_maps.c 	return qs->head == qs->tail;
qs                 35 kernel/bpf/queue_stack_maps.c static bool queue_stack_map_is_full(struct bpf_queue_stack *qs)
qs                 37 kernel/bpf/queue_stack_maps.c 	u32 head = qs->head + 1;
qs                 39 kernel/bpf/queue_stack_maps.c 	if (unlikely(head >= qs->size))
qs                 42 kernel/bpf/queue_stack_maps.c 	return head == qs->tail;
qs                 71 kernel/bpf/queue_stack_maps.c 	struct bpf_queue_stack *qs;
qs                 75 kernel/bpf/queue_stack_maps.c 	cost = queue_size = sizeof(*qs) + size * attr->value_size;
qs                 81 kernel/bpf/queue_stack_maps.c 	qs = bpf_map_area_alloc(queue_size, numa_node);
qs                 82 kernel/bpf/queue_stack_maps.c 	if (!qs) {
qs                 87 kernel/bpf/queue_stack_maps.c 	memset(qs, 0, sizeof(*qs));
qs                 89 kernel/bpf/queue_stack_maps.c 	bpf_map_init_from_attr(&qs->map, attr);
qs                 91 kernel/bpf/queue_stack_maps.c 	bpf_map_charge_move(&qs->map.memory, &mem);
qs                 92 kernel/bpf/queue_stack_maps.c 	qs->size = size;
qs                 94 kernel/bpf/queue_stack_maps.c 	raw_spin_lock_init(&qs->lock);
qs                 96 kernel/bpf/queue_stack_maps.c 	return &qs->map;
qs                102 kernel/bpf/queue_stack_maps.c 	struct bpf_queue_stack *qs = bpf_queue_stack(map);
qs                111 kernel/bpf/queue_stack_maps.c 	bpf_map_area_free(qs);
qs                116 kernel/bpf/queue_stack_maps.c 	struct bpf_queue_stack *qs = bpf_queue_stack(map);
qs                121 kernel/bpf/queue_stack_maps.c 	raw_spin_lock_irqsave(&qs->lock, flags);
qs                123 kernel/bpf/queue_stack_maps.c 	if (queue_stack_map_is_empty(qs)) {
qs                124 kernel/bpf/queue_stack_maps.c 		memset(value, 0, qs->map.value_size);
qs                129 kernel/bpf/queue_stack_maps.c 	ptr = &qs->elements[qs->tail * qs->map.value_size];
qs                130 kernel/bpf/queue_stack_maps.c 	memcpy(value, ptr, qs->map.value_size);
qs                133 kernel/bpf/queue_stack_maps.c 		if (unlikely(++qs->tail >= qs->size))
qs                134 kernel/bpf/queue_stack_maps.c 			qs->tail = 0;
qs                138 kernel/bpf/queue_stack_maps.c 	raw_spin_unlock_irqrestore(&qs->lock, flags);
qs                145 kernel/bpf/queue_stack_maps.c 	struct bpf_queue_stack *qs = bpf_queue_stack(map);
qs                151 kernel/bpf/queue_stack_maps.c 	raw_spin_lock_irqsave(&qs->lock, flags);
qs                153 kernel/bpf/queue_stack_maps.c 	if (queue_stack_map_is_empty(qs)) {
qs                154 kernel/bpf/queue_stack_maps.c 		memset(value, 0, qs->map.value_size);
qs                159 kernel/bpf/queue_stack_maps.c 	index = qs->head - 1;
qs                160 kernel/bpf/queue_stack_maps.c 	if (unlikely(index >= qs->size))
qs                161 kernel/bpf/queue_stack_maps.c 		index = qs->size - 1;
qs                163 kernel/bpf/queue_stack_maps.c 	ptr = &qs->elements[index * qs->map.value_size];
qs                164 kernel/bpf/queue_stack_maps.c 	memcpy(value, ptr, qs->map.value_size);
qs                167 kernel/bpf/queue_stack_maps.c 		qs->head = index;
qs                170 kernel/bpf/queue_stack_maps.c 	raw_spin_unlock_irqrestore(&qs->lock, flags);
qs                202 kernel/bpf/queue_stack_maps.c 	struct bpf_queue_stack *qs = bpf_queue_stack(map);
qs                216 kernel/bpf/queue_stack_maps.c 	raw_spin_lock_irqsave(&qs->lock, irq_flags);
qs                218 kernel/bpf/queue_stack_maps.c 	if (queue_stack_map_is_full(qs)) {
qs                224 kernel/bpf/queue_stack_maps.c 		if (unlikely(++qs->tail >= qs->size))
qs                225 kernel/bpf/queue_stack_maps.c 			qs->tail = 0;
qs                228 kernel/bpf/queue_stack_maps.c 	dst = &qs->elements[qs->head * qs->map.value_size];
qs                229 kernel/bpf/queue_stack_maps.c 	memcpy(dst, value, qs->map.value_size);
qs                231 kernel/bpf/queue_stack_maps.c 	if (unlikely(++qs->head >= qs->size))
qs                232 kernel/bpf/queue_stack_maps.c 		qs->head = 0;
qs                235 kernel/bpf/queue_stack_maps.c 	raw_spin_unlock_irqrestore(&qs->lock, irq_flags);
qs               2950 net/sched/sch_cake.c 	struct gnet_stats_queue qs = { 0 };
qs               2965 net/sched/sch_cake.c 				qs.qlen++;
qs               2970 net/sched/sch_cake.c 		qs.backlog = b->backlogs[idx % CAKE_QUEUES];
qs               2971 net/sched/sch_cake.c 		qs.drops = flow->dropped;
qs               2973 net/sched/sch_cake.c 	if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0)
qs                620 net/sched/sch_fq_codel.c 	struct gnet_stats_queue qs = { 0 };
qs                647 net/sched/sch_fq_codel.c 				qs.qlen++;
qs                652 net/sched/sch_fq_codel.c 		qs.backlog = q->backlogs[idx];
qs                653 net/sched/sch_fq_codel.c 		qs.drops = 0;
qs                655 net/sched/sch_fq_codel.c 	if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0)
qs               1124 net/sched/sch_htb.c 	struct gnet_stats_queue qs = {
qs               1131 net/sched/sch_htb.c 		qdisc_qstats_qlen_backlog(cl->leaf.q, &qlen, &qs.backlog);
qs               1141 net/sched/sch_htb.c 	    gnet_stats_copy_queue(d, NULL, &qs, qlen) < 0)
qs                867 net/sched/sch_sfq.c 	struct gnet_stats_queue qs = { 0 };
qs                874 net/sched/sch_sfq.c 		qs.qlen = slot->qlen;
qs                875 net/sched/sch_sfq.c 		qs.backlog = slot->backlog;
qs                877 net/sched/sch_sfq.c 	if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0)
qs               6208 sound/pci/rme9652/hdspm.c 			levels->speed = qs;