nq               1156 arch/s390/include/asm/pgtable.h 			  unsigned char key, bool nq);
nq               1159 arch/s390/include/asm/pgtable.h 			       bool nq, bool mr, bool mc);
nq               1009 arch/s390/kvm/priv.c 	bool mr = false, mc = false, nq;
nq               1039 arch/s390/kvm/priv.c 	nq = vcpu->run->s.regs.gprs[reg1] & PFMF_NQ;
nq               1091 arch/s390/kvm/priv.c 							key, NULL, nq, mr, mc);
nq                755 arch/s390/mm/pgtable.c 			  unsigned char key, bool nq)
nq                804 arch/s390/mm/pgtable.c 		page_set_storage_key(paddr, skey, !nq);
nq                828 arch/s390/mm/pgtable.c 			       bool nq, bool mr, bool mc)
nq                847 arch/s390/mm/pgtable.c 	rc = set_guest_storage_key(current->mm, addr, key, nq);
nq                 24 drivers/block/null_blk.h 	struct nullb_queue *nq;
nq                545 drivers/block/null_blk_main.c static void put_tag(struct nullb_queue *nq, unsigned int tag)
nq                547 drivers/block/null_blk_main.c 	clear_bit_unlock(tag, nq->tag_map);
nq                549 drivers/block/null_blk_main.c 	if (waitqueue_active(&nq->wait))
nq                550 drivers/block/null_blk_main.c 		wake_up(&nq->wait);
nq                553 drivers/block/null_blk_main.c static unsigned int get_tag(struct nullb_queue *nq)
nq                558 drivers/block/null_blk_main.c 		tag = find_first_zero_bit(nq->tag_map, nq->queue_depth);
nq                559 drivers/block/null_blk_main.c 		if (tag >= nq->queue_depth)
nq                561 drivers/block/null_blk_main.c 	} while (test_and_set_bit_lock(tag, nq->tag_map));
nq                568 drivers/block/null_blk_main.c 	put_tag(cmd->nq, cmd->tag);
nq                573 drivers/block/null_blk_main.c static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq)
nq                578 drivers/block/null_blk_main.c 	tag = get_tag(nq);
nq                580 drivers/block/null_blk_main.c 		cmd = &nq->cmds[tag];
nq                583 drivers/block/null_blk_main.c 		cmd->nq = nq;
nq                584 drivers/block/null_blk_main.c 		if (nq->dev->irqmode == NULL_IRQ_TIMER) {
nq                595 drivers/block/null_blk_main.c static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait)
nq                600 drivers/block/null_blk_main.c 	cmd = __alloc_cmd(nq);
nq                605 drivers/block/null_blk_main.c 		prepare_to_wait(&nq->wait, &wait, TASK_UNINTERRUPTIBLE);
nq                606 drivers/block/null_blk_main.c 		cmd = __alloc_cmd(nq);
nq                613 drivers/block/null_blk_main.c 	finish_wait(&nq->wait, &wait);
nq                619 drivers/block/null_blk_main.c 	int queue_mode = cmd->nq->dev->queue_mode;
nq                643 drivers/block/null_blk_main.c 	ktime_t kt = cmd->nq->dev->completion_nsec;
nq               1056 drivers/block/null_blk_main.c 	struct nullb *nullb = cmd->nq->dev->nullb;
nq               1090 drivers/block/null_blk_main.c 	struct nullb *nullb = cmd->nq->dev->nullb;
nq               1139 drivers/block/null_blk_main.c 	struct nullb_device *dev = cmd->nq->dev;
nq               1162 drivers/block/null_blk_main.c 	struct badblocks *bb = &cmd->nq->dev->badblocks;
nq               1175 drivers/block/null_blk_main.c 	struct nullb_device *dev = cmd->nq->dev;
nq               1189 drivers/block/null_blk_main.c 	switch (cmd->nq->dev->irqmode) {
nq               1191 drivers/block/null_blk_main.c 		switch (cmd->nq->dev->queue_mode) {
nq               1215 drivers/block/null_blk_main.c 	struct nullb_device *dev = cmd->nq->dev;
nq               1289 drivers/block/null_blk_main.c 	struct nullb_queue *nq = nullb_to_queue(nullb);
nq               1292 drivers/block/null_blk_main.c 	cmd = alloc_cmd(nq, 1);
nq               1328 drivers/block/null_blk_main.c 	struct nullb_queue *nq = hctx->driver_data;
nq               1334 drivers/block/null_blk_main.c 	if (nq->dev->irqmode == NULL_IRQ_TIMER) {
nq               1340 drivers/block/null_blk_main.c 	cmd->nq = nq;
nq               1349 drivers/block/null_blk_main.c 		nq->requeue_selection++;
nq               1350 drivers/block/null_blk_main.c 		if (nq->requeue_selection & 1)
nq               1369 drivers/block/null_blk_main.c static void cleanup_queue(struct nullb_queue *nq)
nq               1371 drivers/block/null_blk_main.c 	kfree(nq->tag_map);
nq               1372 drivers/block/null_blk_main.c 	kfree(nq->cmds);
nq               1444 drivers/block/null_blk_main.c static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
nq               1447 drivers/block/null_blk_main.c 	BUG_ON(!nq);
nq               1449 drivers/block/null_blk_main.c 	init_waitqueue_head(&nq->wait);
nq               1450 drivers/block/null_blk_main.c 	nq->queue_depth = nullb->queue_depth;
nq               1451 drivers/block/null_blk_main.c 	nq->dev = nullb->dev;
nq               1458 drivers/block/null_blk_main.c 	struct nullb_queue *nq;
nq               1464 drivers/block/null_blk_main.c 		nq = &nullb->queues[i];
nq               1465 drivers/block/null_blk_main.c 		hctx->driver_data = nq;
nq               1466 drivers/block/null_blk_main.c 		null_init_queue(nullb, nq);
nq               1471 drivers/block/null_blk_main.c static int setup_commands(struct nullb_queue *nq)
nq               1476 drivers/block/null_blk_main.c 	nq->cmds = kcalloc(nq->queue_depth, sizeof(*cmd), GFP_KERNEL);
nq               1477 drivers/block/null_blk_main.c 	if (!nq->cmds)
nq               1480 drivers/block/null_blk_main.c 	tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG;
nq               1481 drivers/block/null_blk_main.c 	nq->tag_map = kcalloc(tag_size, sizeof(unsigned long), GFP_KERNEL);
nq               1482 drivers/block/null_blk_main.c 	if (!nq->tag_map) {
nq               1483 drivers/block/null_blk_main.c 		kfree(nq->cmds);
nq               1487 drivers/block/null_blk_main.c 	for (i = 0; i < nq->queue_depth; i++) {
nq               1488 drivers/block/null_blk_main.c 		cmd = &nq->cmds[i];
nq               1512 drivers/block/null_blk_main.c 	struct nullb_queue *nq;
nq               1516 drivers/block/null_blk_main.c 		nq = &nullb->queues[i];
nq               1518 drivers/block/null_blk_main.c 		null_init_queue(nullb, nq);
nq               1520 drivers/block/null_blk_main.c 		ret = setup_commands(nq);
nq                 94 drivers/block/null_blk_zoned.c 	struct nullb_device *dev = cmd->nq->dev;
nq                127 drivers/block/null_blk_zoned.c 	struct nullb_device *dev = cmd->nq->dev;
nq                146 drivers/infiniband/hw/bnxt_re/bnxt_re.h 	struct bnxt_qplib_nq		nq[BNXT_RE_MAX_MSIX];
nq               1297 drivers/infiniband/hw/bnxt_re/ib_verbs.c 	struct bnxt_qplib_nq *nq = NULL;
nq               1300 drivers/infiniband/hw/bnxt_re/ib_verbs.c 		nq = qplib_srq->cq->nq;
nq               1304 drivers/infiniband/hw/bnxt_re/ib_verbs.c 	if (nq)
nq               1305 drivers/infiniband/hw/bnxt_re/ib_verbs.c 		nq->budget--;
nq               1349 drivers/infiniband/hw/bnxt_re/ib_verbs.c 	struct bnxt_qplib_nq *nq = NULL;
nq               1377 drivers/infiniband/hw/bnxt_re/ib_verbs.c 	srq->qplib_srq.eventq_hw_ring_id = rdev->nq[0].ring_id;
nq               1378 drivers/infiniband/hw/bnxt_re/ib_verbs.c 	nq = &rdev->nq[0];
nq               1404 drivers/infiniband/hw/bnxt_re/ib_verbs.c 	if (nq)
nq               1405 drivers/infiniband/hw/bnxt_re/ib_verbs.c 		nq->budget++;
nq               2518 drivers/infiniband/hw/bnxt_re/ib_verbs.c 	struct bnxt_qplib_nq *nq;
nq               2523 drivers/infiniband/hw/bnxt_re/ib_verbs.c 	nq = cq->qplib_cq.nq;
nq               2529 drivers/infiniband/hw/bnxt_re/ib_verbs.c 	nq->budget--;
nq               2541 drivers/infiniband/hw/bnxt_re/ib_verbs.c 	struct bnxt_qplib_nq *nq = NULL;
nq               2593 drivers/infiniband/hw/bnxt_re/ib_verbs.c 	nq = &rdev->nq[nq_alloc_cnt % (rdev->num_msix - 1)];
nq               2595 drivers/infiniband/hw/bnxt_re/ib_verbs.c 	cq->qplib_cq.cnq_hw_ring_id = nq->ring_id;
nq               2596 drivers/infiniband/hw/bnxt_re/ib_verbs.c 	cq->qplib_cq.nq	= nq;
nq               2606 drivers/infiniband/hw/bnxt_re/ib_verbs.c 	nq->budget++;
nq                215 drivers/infiniband/hw/bnxt_re/main.c 	struct bnxt_qplib_nq *nq;
nq                219 drivers/infiniband/hw/bnxt_re/main.c 		nq = &rdev->nq[indx - 1];
nq                220 drivers/infiniband/hw/bnxt_re/main.c 		bnxt_qplib_nq_stop_irq(nq, false);
nq                231 drivers/infiniband/hw/bnxt_re/main.c 	struct bnxt_qplib_nq *nq;
nq                253 drivers/infiniband/hw/bnxt_re/main.c 		nq = &rdev->nq[indx - 1];
nq                254 drivers/infiniband/hw/bnxt_re/main.c 		rc = bnxt_qplib_nq_start_irq(nq, indx - 1,
nq                850 drivers/infiniband/hw/bnxt_re/main.c static int bnxt_re_srqn_handler(struct bnxt_qplib_nq *nq,
nq                880 drivers/infiniband/hw/bnxt_re/main.c static int bnxt_re_cqn_handler(struct bnxt_qplib_nq *nq,
nq                910 drivers/infiniband/hw/bnxt_re/main.c 		bnxt_qplib_disable_nq(&rdev->nq[i - 1]);
nq                926 drivers/infiniband/hw/bnxt_re/main.c 		rc = bnxt_qplib_enable_nq(rdev->en_dev->pdev, &rdev->nq[i - 1],
nq                940 drivers/infiniband/hw/bnxt_re/main.c 		bnxt_qplib_disable_nq(&rdev->nq[i]);
nq                951 drivers/infiniband/hw/bnxt_re/main.c 		bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id, type);
nq                952 drivers/infiniband/hw/bnxt_re/main.c 		rdev->nq[i].res = NULL;
nq                953 drivers/infiniband/hw/bnxt_re/main.c 		bnxt_qplib_free_nq(&rdev->nq[i]);
nq                999 drivers/infiniband/hw/bnxt_re/main.c 		rdev->nq[i].res = &rdev->qplib_res;
nq               1000 drivers/infiniband/hw/bnxt_re/main.c 		rdev->nq[i].hwq.max_elements = BNXT_RE_MAX_CQ_COUNT +
nq               1002 drivers/infiniband/hw/bnxt_re/main.c 		rc = bnxt_qplib_alloc_nq(rdev->en_dev->pdev, &rdev->nq[i]);
nq               1009 drivers/infiniband/hw/bnxt_re/main.c 		pg_map = rdev->nq[i].hwq.pbl[PBL_LVL_0].pg_map_arr;
nq               1010 drivers/infiniband/hw/bnxt_re/main.c 		pages = rdev->nq[i].hwq.pbl[rdev->nq[i].hwq.level].pg_count;
nq               1014 drivers/infiniband/hw/bnxt_re/main.c 					    &rdev->nq[i].ring_id);
nq               1019 drivers/infiniband/hw/bnxt_re/main.c 			bnxt_qplib_free_nq(&rdev->nq[i]);
nq               1028 drivers/infiniband/hw/bnxt_re/main.c 		bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id, type);
nq               1029 drivers/infiniband/hw/bnxt_re/main.c 		bnxt_qplib_free_nq(&rdev->nq[i]);
nq                159 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	struct bnxt_qplib_nq *nq = nq_work->nq;
nq                161 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	if (cq && nq) {
nq                163 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		if (atomic_read(&cq->arm_state) && nq->cqn_handler) {
nq                164 drivers/infiniband/hw/bnxt_re/qplib_fp.c 			dev_dbg(&nq->pdev->dev,
nq                166 drivers/infiniband/hw/bnxt_re/qplib_fp.c 				__func__, cq, nq);
nq                167 drivers/infiniband/hw/bnxt_re/qplib_fp.c 			nq->cqn_handler(nq, cq);
nq                238 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	struct bnxt_qplib_nq *nq = (struct bnxt_qplib_nq *)data;
nq                239 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	struct bnxt_qplib_hwq *hwq = &nq->hwq;
nq                246 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	int budget = nq->budget;
nq                248 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	bool gen_p5 = bnxt_qplib_is_chip_gen_p5(nq->res->cctx);
nq                278 drivers/infiniband/hw/bnxt_re/qplib_fp.c 			if (!nq->cqn_handler(nq, (cq)))
nq                281 drivers/infiniband/hw/bnxt_re/qplib_fp.c 				dev_warn(&nq->pdev->dev,
nq                296 drivers/infiniband/hw/bnxt_re/qplib_fp.c 			if (!nq->srqn_handler(nq,
nq                301 drivers/infiniband/hw/bnxt_re/qplib_fp.c 				dev_warn(&nq->pdev->dev,
nq                309 drivers/infiniband/hw/bnxt_re/qplib_fp.c 			dev_warn(&nq->pdev->dev,
nq                317 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		bnxt_qplib_ring_nq_db_rearm(nq->bar_reg_iomem, hwq->cons,
nq                318 drivers/infiniband/hw/bnxt_re/qplib_fp.c 					    hwq->max_elements, nq->ring_id,
nq                325 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	struct bnxt_qplib_nq *nq = dev_instance;
nq                326 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	struct bnxt_qplib_hwq *hwq = &nq->hwq;
nq                332 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	nq_ptr = (struct nq_base **)nq->hwq.pbl_ptr;
nq                336 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	tasklet_schedule(&nq->worker);
nq                341 drivers/infiniband/hw/bnxt_re/qplib_fp.c void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill)
nq                343 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	bool gen_p5 = bnxt_qplib_is_chip_gen_p5(nq->res->cctx);
nq                344 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	tasklet_disable(&nq->worker);
nq                346 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	bnxt_qplib_ring_nq_db(nq->bar_reg_iomem, nq->hwq.cons,
nq                347 drivers/infiniband/hw/bnxt_re/qplib_fp.c 			      nq->hwq.max_elements, nq->ring_id, gen_p5);
nq                349 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	synchronize_irq(nq->vector);
nq                351 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		tasklet_kill(&nq->worker);
nq                352 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	if (nq->requested) {
nq                353 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		irq_set_affinity_hint(nq->vector, NULL);
nq                354 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		free_irq(nq->vector, nq);
nq                355 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		nq->requested = false;
nq                359 drivers/infiniband/hw/bnxt_re/qplib_fp.c void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
nq                361 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	if (nq->cqn_wq) {
nq                362 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		destroy_workqueue(nq->cqn_wq);
nq                363 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		nq->cqn_wq = NULL;
nq                367 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	if (nq->requested)
nq                368 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		bnxt_qplib_nq_stop_irq(nq, true);
nq                370 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	if (nq->bar_reg_iomem)
nq                371 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		iounmap(nq->bar_reg_iomem);
nq                372 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	nq->bar_reg_iomem = NULL;
nq                374 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	nq->cqn_handler = NULL;
nq                375 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	nq->srqn_handler = NULL;
nq                376 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	nq->vector = 0;
nq                379 drivers/infiniband/hw/bnxt_re/qplib_fp.c int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
nq                382 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	bool gen_p5 = bnxt_qplib_is_chip_gen_p5(nq->res->cctx);
nq                385 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	if (nq->requested)
nq                388 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	nq->vector = msix_vector;
nq                390 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		tasklet_init(&nq->worker, bnxt_qplib_service_nq,
nq                391 drivers/infiniband/hw/bnxt_re/qplib_fp.c 			     (unsigned long)nq);
nq                393 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		tasklet_enable(&nq->worker);
nq                395 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	snprintf(nq->name, sizeof(nq->name), "bnxt_qplib_nq-%d", nq_indx);
nq                396 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	rc = request_irq(nq->vector, bnxt_qplib_nq_irq, 0, nq->name, nq);
nq                400 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	cpumask_clear(&nq->mask);
nq                401 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	cpumask_set_cpu(nq_indx, &nq->mask);
nq                402 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	rc = irq_set_affinity_hint(nq->vector, &nq->mask);
nq                404 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		dev_warn(&nq->pdev->dev,
nq                406 drivers/infiniband/hw/bnxt_re/qplib_fp.c 			 nq->vector, nq_indx);
nq                408 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	nq->requested = true;
nq                409 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	bnxt_qplib_ring_nq_db_rearm(nq->bar_reg_iomem, nq->hwq.cons,
nq                410 drivers/infiniband/hw/bnxt_re/qplib_fp.c 				    nq->hwq.max_elements, nq->ring_id, gen_p5);
nq                415 drivers/infiniband/hw/bnxt_re/qplib_fp.c int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
nq                417 drivers/infiniband/hw/bnxt_re/qplib_fp.c 			 int (*cqn_handler)(struct bnxt_qplib_nq *nq,
nq                419 drivers/infiniband/hw/bnxt_re/qplib_fp.c 			 int (*srqn_handler)(struct bnxt_qplib_nq *nq,
nq                427 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		nq->cqn_handler = cqn_handler;
nq                430 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		nq->srqn_handler = srqn_handler;
nq                433 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	nq->cqn_wq  = create_singlethread_workqueue("bnxt_qplib_nq");
nq                434 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	if (!nq->cqn_wq)
nq                437 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	nq->bar_reg = NQ_CONS_PCI_BAR_REGION;
nq                438 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	nq->bar_reg_off = bar_reg_offset;
nq                439 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	nq_base = pci_resource_start(pdev, nq->bar_reg);
nq                445 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	nq->bar_reg_iomem = ioremap_nocache(nq_base + nq->bar_reg_off, 8);
nq                446 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	if (!nq->bar_reg_iomem) {
nq                451 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	rc = bnxt_qplib_nq_start_irq(nq, nq_idx, msix_vector, true);
nq                453 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		dev_err(&nq->pdev->dev,
nq                460 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	bnxt_qplib_disable_nq(nq);
nq                464 drivers/infiniband/hw/bnxt_re/qplib_fp.c void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq)
nq                466 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	if (nq->hwq.max_elements) {
nq                467 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		bnxt_qplib_free_hwq(nq->pdev, &nq->hwq);
nq                468 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		nq->hwq.max_elements = 0;
nq                472 drivers/infiniband/hw/bnxt_re/qplib_fp.c int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq)
nq                476 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	nq->pdev = pdev;
nq                477 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	if (!nq->hwq.max_elements ||
nq                478 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	    nq->hwq.max_elements > BNXT_QPLIB_NQE_MAX_CNT)
nq                479 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT;
nq                480 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	hwq_type = bnxt_qplib_get_hwq_type(nq->res);
nq                481 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	if (bnxt_qplib_alloc_init_hwq(nq->pdev, &nq->hwq, NULL,
nq                482 drivers/infiniband/hw/bnxt_re/qplib_fp.c 				      &nq->hwq.max_elements,
nq                487 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	nq->budget = 8;
nq               1795 drivers/infiniband/hw/bnxt_re/qplib_fp.c 			nq_work->nq = qp->scq->nq;
nq               1797 drivers/infiniband/hw/bnxt_re/qplib_fp.c 			queue_work(qp->scq->nq->cqn_wq, &nq_work->work);
nq               1885 drivers/infiniband/hw/bnxt_re/qplib_fp.c 			nq_work->nq = qp->rcq->nq;
nq               1887 drivers/infiniband/hw/bnxt_re/qplib_fp.c 			queue_work(qp->rcq->nq->cqn_wq, &nq_work->work);
nq               2831 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	flush_workqueue(qp->scq->nq->cqn_wq);
nq               2833 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		flush_workqueue(qp->rcq->nq->cqn_wq);
nq                380 drivers/infiniband/hw/bnxt_re/qplib_fp.h 	struct bnxt_qplib_nq		*nq;
nq                489 drivers/infiniband/hw/bnxt_re/qplib_fp.h 	int			(*cqn_handler)(struct bnxt_qplib_nq *nq,
nq                491 drivers/infiniband/hw/bnxt_re/qplib_fp.h 	int			(*srqn_handler)(struct bnxt_qplib_nq *nq,
nq                500 drivers/infiniband/hw/bnxt_re/qplib_fp.h 	struct bnxt_qplib_nq    *nq;
nq                504 drivers/infiniband/hw/bnxt_re/qplib_fp.h void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill);
nq                505 drivers/infiniband/hw/bnxt_re/qplib_fp.h void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq);
nq                506 drivers/infiniband/hw/bnxt_re/qplib_fp.h int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
nq                508 drivers/infiniband/hw/bnxt_re/qplib_fp.h int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
nq                510 drivers/infiniband/hw/bnxt_re/qplib_fp.h 			 int (*cqn_handler)(struct bnxt_qplib_nq *nq,
nq                512 drivers/infiniband/hw/bnxt_re/qplib_fp.h 			 int (*srqn_handler)(struct bnxt_qplib_nq *nq,
nq                552 drivers/infiniband/hw/bnxt_re/qplib_fp.h void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq);
nq                553 drivers/infiniband/hw/bnxt_re/qplib_fp.h int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq);
nq                 64 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	int nq = bnx2x_num_queues ? : netif_get_num_default_rss_queues();
nq                 68 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		nq = 1;
nq                 70 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	nq = clamp(nq, 1, BNX2X_MAX_QUEUES(bp));
nq                 71 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	return nq;
nq               5919 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	int nq = bnxt_nq_rings_in_use(bp);
nq               5941 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	    hw_resc->resv_irqs != nq)
nq                140 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c 	unsigned int nq = rxq_info->nrxq + rxq_info->nciq;
nq                154 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c 	for (i = 0; i < nq; i++, q++) {
nq                460 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c 	int nq = txq_info->ntxq;
nq                463 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c 	for (i = 0; i < nq; i++) {
nq                483 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c 	int nq = txq_info->ntxq;
nq                486 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c 	j = nq / adap->params.nports;
nq                487 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c 	for (i = 0; i < nq; i++) {
nq               5173 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 		int nq = min(n, 32);
nq               5176 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 		cmd.niqid = cpu_to_be16(nq);
nq               5179 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 		start += nq;
nq               5180 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 		n -= nq;
nq               5182 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 		while (nq > 0) {
nq               5196 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 			nq -= 3;
nq               1273 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c 		int nq = min(n, 32);
nq               1280 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c 		cmd.niqid = cpu_to_be16(nq);
nq               1286 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c 		start += nq;
nq               1287 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c 		n -= nq;
nq               1294 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c 		while (nq > 0) {
nq               1303 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c 			int nqbuf = min(3, nq);
nq               1305 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c 			nq -= nqbuf;
nq                756 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 	struct netdev_queue *nq;
nq                833 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 	nq = netdev_get_tx_queue(net_dev, queue_mapping);
nq                834 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 	netdev_tx_sent_queue(nq, fd_len);
nq                849 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 		netdev_tx_completed_queue(nq, 1, fd_len);
nq               1118 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 	struct netdev_queue *nq;
nq               1176 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 		nq = netdev_get_tx_queue(priv->net_dev, txc_fq->flowid);
nq               1177 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 		netdev_tx_completed_queue(nq, txc_fq->dq_frames,
nq                804 drivers/net/ethernet/freescale/fec_main.c 	struct netdev_queue *nq;
nq                809 drivers/net/ethernet/freescale/fec_main.c 	nq = netdev_get_tx_queue(ndev, queue);
nq                820 drivers/net/ethernet/freescale/fec_main.c 		netif_tx_stop_queue(nq);
nq               1249 drivers/net/ethernet/freescale/fec_main.c 	struct netdev_queue *nq;
nq               1259 drivers/net/ethernet/freescale/fec_main.c 	nq = netdev_get_tx_queue(ndev, queue_id);
nq               1334 drivers/net/ethernet/freescale/fec_main.c 		if (netif_tx_queue_stopped(nq)) {
nq               1337 drivers/net/ethernet/freescale/fec_main.c 				netif_tx_wake_queue(nq);
nq                495 drivers/net/ethernet/marvell/mv643xx_eth.c 	struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
nq                497 drivers/net/ethernet/marvell/mv643xx_eth.c 	if (netif_tx_queue_stopped(nq)) {
nq                498 drivers/net/ethernet/marvell/mv643xx_eth.c 		__netif_tx_lock(nq, smp_processor_id());
nq                500 drivers/net/ethernet/marvell/mv643xx_eth.c 			netif_tx_wake_queue(nq);
nq                501 drivers/net/ethernet/marvell/mv643xx_eth.c 		__netif_tx_unlock(nq);
nq               1001 drivers/net/ethernet/marvell/mv643xx_eth.c 	struct netdev_queue *nq;
nq               1005 drivers/net/ethernet/marvell/mv643xx_eth.c 	nq = netdev_get_tx_queue(dev, queue);
nq               1024 drivers/net/ethernet/marvell/mv643xx_eth.c 			netif_tx_stop_queue(nq);
nq               1038 drivers/net/ethernet/marvell/mv643xx_eth.c 	struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
nq               1042 drivers/net/ethernet/marvell/mv643xx_eth.c 	__netif_tx_lock(nq, smp_processor_id());
nq               1055 drivers/net/ethernet/marvell/mv643xx_eth.c 	__netif_tx_unlock(nq);
nq               1063 drivers/net/ethernet/marvell/mv643xx_eth.c 	struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
nq               1066 drivers/net/ethernet/marvell/mv643xx_eth.c 	__netif_tx_lock_bh(nq);
nq               1122 drivers/net/ethernet/marvell/mv643xx_eth.c 	__netif_tx_unlock_bh(nq);
nq               1769 drivers/net/ethernet/marvell/mvneta.c 				 struct netdev_queue *nq)
nq               1795 drivers/net/ethernet/marvell/mvneta.c 	netdev_tx_completed_queue(nq, pkts_compl, bytes_compl);
nq               1802 drivers/net/ethernet/marvell/mvneta.c 	struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
nq               1809 drivers/net/ethernet/marvell/mvneta.c 	mvneta_txq_bufs_free(pp, txq, tx_done, nq);
nq               1813 drivers/net/ethernet/marvell/mvneta.c 	if (netif_tx_queue_stopped(nq)) {
nq               1815 drivers/net/ethernet/marvell/mvneta.c 			netif_tx_wake_queue(nq);
nq               2478 drivers/net/ethernet/marvell/mvneta.c 		struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
nq               2480 drivers/net/ethernet/marvell/mvneta.c 		netdev_tx_sent_queue(nq, len);
nq               2484 drivers/net/ethernet/marvell/mvneta.c 			netif_tx_stop_queue(nq);
nq               2486 drivers/net/ethernet/marvell/mvneta.c 		if (!netdev_xmit_more() || netif_xmit_stopped(nq) ||
nq               2510 drivers/net/ethernet/marvell/mvneta.c 	struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
nq               2513 drivers/net/ethernet/marvell/mvneta.c 	mvneta_txq_bufs_free(pp, txq, tx_done, nq);
nq               2527 drivers/net/ethernet/marvell/mvneta.c 	struct netdev_queue *nq;
nq               2533 drivers/net/ethernet/marvell/mvneta.c 		nq = netdev_get_tx_queue(pp->dev, txq->id);
nq               2534 drivers/net/ethernet/marvell/mvneta.c 		__netif_tx_lock(nq, cpu);
nq               2539 drivers/net/ethernet/marvell/mvneta.c 		__netif_tx_unlock(nq);
nq               3071 drivers/net/ethernet/marvell/mvneta.c 	struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
nq               3084 drivers/net/ethernet/marvell/mvneta.c 	netdev_tx_reset_queue(nq);
nq               2270 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id);
nq               2283 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	if (netif_tx_queue_stopped(nq))
nq               2285 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 			netif_tx_wake_queue(nq);
nq               3274 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
nq               3285 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 			netif_tx_stop_queue(nq);
nq                190 drivers/net/tap.c 	struct tap_queue *nq;
nq                201 drivers/net/tap.c 		nq = rtnl_dereference(tap->taps[tap->numvtaps - 1]);
nq                202 drivers/net/tap.c 		nq->queue_index = index;
nq                204 drivers/net/tap.c 		rcu_assign_pointer(tap->taps[index], nq);
nq                751 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 	int nq = trans->trans_cfg->base_params->num_of_queues;
nq                754 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 	int clear_dwords = (SCD_TRANS_TBL_OFFSET_QUEUE(nq) -
nq               3852 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 	u32 hq, lq, nq, eq, pubq;
nq               3857 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 	nq = 0;
nq               3866 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 		nq = fops->page_num_norm;
nq               3868 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 	val32 = (nq << RQPN_NPQ_SHIFT) | (eq << RQPN_EPQ_SHIFT);
nq               3871 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 	pubq = fops->total_page_num - hq - lq - nq - 1;
nq                 70 fs/xfs/xfs_trans_dquot.c 	struct xfs_dqtrx	*oq, *nq;
nq                 96 fs/xfs/xfs_trans_dquot.c 			nq = &nqa[i];
nq                101 fs/xfs/xfs_trans_dquot.c 			nq->qt_dquot = oq->qt_dquot;
nq                102 fs/xfs/xfs_trans_dquot.c 			nq->qt_bcount_delta = nq->qt_icount_delta = 0;
nq                103 fs/xfs/xfs_trans_dquot.c 			nq->qt_rtbcount_delta = 0;
nq                108 fs/xfs/xfs_trans_dquot.c 			nq->qt_blk_res = oq->qt_blk_res - blk_res_used;
nq                111 fs/xfs/xfs_trans_dquot.c 			nq->qt_rtblk_res = oq->qt_rtblk_res -
nq                115 fs/xfs/xfs_trans_dquot.c 			nq->qt_ino_res = oq->qt_ino_res - oq->qt_ino_res_used;
nq                315 net/sched/sch_api.c 	struct netdev_queue *nq;
nq                324 net/sched/sch_api.c 	nq = dev_ingress_queue_rcu(dev);
nq                325 net/sched/sch_api.c 	if (nq)
nq                326 net/sched/sch_api.c 		q = qdisc_match_from_root(nq->qdisc_sleeping, handle);