mq                 92 arch/ia64/include/asm/sn/sn_sal.h sn_mq_watchlist_alloc(int blade, void *mq, unsigned int mq_size,
mq                100 arch/ia64/include/asm/sn/sn_sal.h 	addr = (unsigned long)mq;
mq                 42 arch/powerpc/include/asm/ptrace.h 			unsigned long mq;
mq                 49 arch/powerpc/include/uapi/asm/ptrace.h 	unsigned long mq;		/* 601 only (not used at present) */
mq                 24 arch/powerpc/kernel/ppc32.h 	unsigned int mq;		/* 601 only (not used at present) */
mq               1735 arch/powerpc/kernel/process.c 	regs->mq = 0;
mq                119 arch/powerpc/kernel/ptrace.c 	REG_OFFSET_NAME(mq),
mq               3388 arch/powerpc/kernel/ptrace.c 	BUILD_BUG_ON(offsetof(struct pt_regs, mq) !=
mq               3389 arch/powerpc/kernel/ptrace.c 		     offsetof(struct user_pt_regs, mq));
mq                 63 arch/powerpc/perf/perf_regs.c 	PT_REGS_OFFSET(PERF_REG_POWERPC_SOFTE, mq),
mq                279 drivers/infiniband/hw/ocrdma/ocrdma.h 	struct ocrdma_mq mq;
mq                124 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	    (dev->mq.cq.va + (dev->mq.cq.tail * sizeof(struct ocrdma_mcqe)));
mq                133 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	dev->mq.cq.tail = (dev->mq.cq.tail + 1) & (OCRDMA_MQ_CQ_LEN - 1);
mq                138 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	return dev->mq.sq.va + (dev->mq.sq.head * sizeof(struct ocrdma_mqe));
mq                143 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	dev->mq.sq.head = (dev->mq.sq.head + 1) & (OCRDMA_MQ_LEN - 1);
mq                148 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	return dev->mq.sq.va + (dev->mqe_ctx.tag * sizeof(struct ocrdma_mqe));
mq                325 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	val |= dev->mq.sq.id & OCRDMA_MQ_ID_MASK;
mq                565 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 				struct ocrdma_queue_info *mq,
mq                574 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	num_pages = PAGES_4K_SPANNED(mq->va, mq->size);
mq                589 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	cmd->async_cqid_ringsize |= (ocrdma_encoded_q_len(mq->len) <<
mq                594 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	ocrdma_build_q_pages(pa, num_pages, mq->dma, PAGE_SIZE_4K);
mq                598 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 		mq->id = rsp->id;
mq                599 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 		mq->created = true;
mq                609 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	status = ocrdma_alloc_q(dev, &dev->mq.cq, OCRDMA_MQ_CQ_LEN,
mq                615 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	status = ocrdma_mbx_mq_cq_create(dev, &dev->mq.cq, &dev->eq_tbl[0].q);
mq                624 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	status = ocrdma_alloc_q(dev, &dev->mq.sq, OCRDMA_MQ_LEN,
mq                628 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	status = ocrdma_mbx_create_mq(dev, &dev->mq.sq, &dev->mq.cq);
mq                631 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	ocrdma_ring_cq_db(dev, dev->mq.cq.id, true, false, 0);
mq                635 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	ocrdma_free_q(dev, &dev->mq.sq);
mq                637 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	ocrdma_mbx_delete_q(dev, &dev->mq.cq, QTYPE_CQ);
mq                639 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	ocrdma_free_q(dev, &dev->mq.cq);
mq                650 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	mbxq = &dev->mq.sq;
mq                657 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	cq = &dev->mq.cq;
mq                898 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	ocrdma_ring_cq_db(dev, dev->mq.cq.id, true, false, cqe_popped);
mq                988 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	if (cq_id == dev->mq.cq.id)
mq               1043 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	dev->mqe_ctx.tag = dev->mq.sq.head;
mq               1046 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	cmd->hdr.tag_lo = dev->mq.sq.head;
mq                 27 drivers/input/joystick/maplecontrol.c static void dc_pad_callback(struct mapleq *mq)
mq                 30 drivers/input/joystick/maplecontrol.c 	struct maple_device *mapledev = mq->dev;
mq                 33 drivers/input/joystick/maplecontrol.c 	unsigned char *res = mq->recvbuf->buf;
mq                125 drivers/input/keyboard/maple_keyb.c static void dc_kbd_callback(struct mapleq *mq)
mq                127 drivers/input/keyboard/maple_keyb.c 	struct maple_device *mapledev = mq->dev;
mq                129 drivers/input/keyboard/maple_keyb.c 	unsigned long *buf = (unsigned long *)(mq->recvbuf->buf);
mq                 27 drivers/input/mouse/maplemouse.c static void dc_mouse_callback(struct mapleq *mq)
mq                 30 drivers/input/mouse/maplemouse.c 	struct maple_device *mapledev = mq->dev;
mq                 33 drivers/input/mouse/maplemouse.c 	unsigned char *res = mq->recvbuf->buf;
mq                 48 drivers/isdn/capi/capilib.c 	struct capilib_msgidqueue *mq;
mq                 49 drivers/isdn/capi/capilib.c 	if ((mq = np->msgidfree) == NULL)
mq                 51 drivers/isdn/capi/capilib.c 	np->msgidfree = mq->next;
mq                 52 drivers/isdn/capi/capilib.c 	mq->msgid = msgid;
mq                 53 drivers/isdn/capi/capilib.c 	mq->next = NULL;
mq                 55 drivers/isdn/capi/capilib.c 		np->msgidlast->next = mq;
mq                 56 drivers/isdn/capi/capilib.c 	np->msgidlast = mq;
mq                 58 drivers/isdn/capi/capilib.c 		np->msgidqueue = mq;
mq                 68 drivers/isdn/capi/capilib.c 			struct capilib_msgidqueue *mq = *pp;
mq                 69 drivers/isdn/capi/capilib.c 			*pp = mq->next;
mq                 70 drivers/isdn/capi/capilib.c 			if (mq == np->msgidlast)
mq                 72 drivers/isdn/capi/capilib.c 			mq->next = np->msgidfree;
mq                 73 drivers/isdn/capi/capilib.c 			np->msgidfree = mq;
mq                257 drivers/mailbox/omap-mailbox.c 	struct omap_mbox_queue *mq =
mq                263 drivers/mailbox/omap-mailbox.c 	while (kfifo_len(&mq->fifo) >= sizeof(msg)) {
mq                264 drivers/mailbox/omap-mailbox.c 		len = kfifo_out(&mq->fifo, (unsigned char *)&msg, sizeof(msg));
mq                268 drivers/mailbox/omap-mailbox.c 		mbox_chan_received_data(mq->mbox->chan, (void *)data);
mq                269 drivers/mailbox/omap-mailbox.c 		spin_lock_irq(&mq->lock);
mq                270 drivers/mailbox/omap-mailbox.c 		if (mq->full) {
mq                271 drivers/mailbox/omap-mailbox.c 			mq->full = false;
mq                272 drivers/mailbox/omap-mailbox.c 			_omap_mbox_enable_irq(mq->mbox, IRQ_RX);
mq                274 drivers/mailbox/omap-mailbox.c 		spin_unlock_irq(&mq->lock);
mq                290 drivers/mailbox/omap-mailbox.c 	struct omap_mbox_queue *mq = mbox->rxq;
mq                295 drivers/mailbox/omap-mailbox.c 		if (unlikely(kfifo_avail(&mq->fifo) < sizeof(msg))) {
mq                297 drivers/mailbox/omap-mailbox.c 			mq->full = true;
mq                303 drivers/mailbox/omap-mailbox.c 		len = kfifo_in(&mq->fifo, (unsigned char *)&msg, sizeof(msg));
mq                329 drivers/mailbox/omap-mailbox.c 	struct omap_mbox_queue *mq;
mq                334 drivers/mailbox/omap-mailbox.c 	mq = kzalloc(sizeof(*mq), GFP_KERNEL);
mq                335 drivers/mailbox/omap-mailbox.c 	if (!mq)
mq                338 drivers/mailbox/omap-mailbox.c 	spin_lock_init(&mq->lock);
mq                340 drivers/mailbox/omap-mailbox.c 	if (kfifo_alloc(&mq->fifo, mbox_kfifo_size, GFP_KERNEL))
mq                343 drivers/mailbox/omap-mailbox.c 	INIT_WORK(&mq->work, work);
mq                344 drivers/mailbox/omap-mailbox.c 	return mq;
mq                347 drivers/mailbox/omap-mailbox.c 	kfree(mq);
mq                360 drivers/mailbox/omap-mailbox.c 	struct omap_mbox_queue *mq;
mq                362 drivers/mailbox/omap-mailbox.c 	mq = mbox_queue_alloc(mbox, mbox_rx_work);
mq                363 drivers/mailbox/omap-mailbox.c 	if (!mq)
mq                365 drivers/mailbox/omap-mailbox.c 	mbox->rxq = mq;
mq                366 drivers/mailbox/omap-mailbox.c 	mq->mbox = mbox;
mq                867 drivers/md/dm-cache-policy-smq.c static struct entry *writeback_sentinel(struct smq_policy *mq, unsigned level)
mq                869 drivers/md/dm-cache-policy-smq.c 	return get_sentinel(&mq->writeback_sentinel_alloc, level, mq->current_writeback_sentinels);
mq                872 drivers/md/dm-cache-policy-smq.c static struct entry *demote_sentinel(struct smq_policy *mq, unsigned level)
mq                874 drivers/md/dm-cache-policy-smq.c 	return get_sentinel(&mq->demote_sentinel_alloc, level, mq->current_demote_sentinels);
mq                877 drivers/md/dm-cache-policy-smq.c static void __update_writeback_sentinels(struct smq_policy *mq)
mq                880 drivers/md/dm-cache-policy-smq.c 	struct queue *q = &mq->dirty;
mq                884 drivers/md/dm-cache-policy-smq.c 		sentinel = writeback_sentinel(mq, level);
mq                890 drivers/md/dm-cache-policy-smq.c static void __update_demote_sentinels(struct smq_policy *mq)
mq                893 drivers/md/dm-cache-policy-smq.c 	struct queue *q = &mq->clean;
mq                897 drivers/md/dm-cache-policy-smq.c 		sentinel = demote_sentinel(mq, level);
mq                903 drivers/md/dm-cache-policy-smq.c static void update_sentinels(struct smq_policy *mq)
mq                905 drivers/md/dm-cache-policy-smq.c 	if (time_after(jiffies, mq->next_writeback_period)) {
mq                906 drivers/md/dm-cache-policy-smq.c 		mq->next_writeback_period = jiffies + WRITEBACK_PERIOD;
mq                907 drivers/md/dm-cache-policy-smq.c 		mq->current_writeback_sentinels = !mq->current_writeback_sentinels;
mq                908 drivers/md/dm-cache-policy-smq.c 		__update_writeback_sentinels(mq);
mq                911 drivers/md/dm-cache-policy-smq.c 	if (time_after(jiffies, mq->next_demote_period)) {
mq                912 drivers/md/dm-cache-policy-smq.c 		mq->next_demote_period = jiffies + DEMOTE_PERIOD;
mq                913 drivers/md/dm-cache-policy-smq.c 		mq->current_demote_sentinels = !mq->current_demote_sentinels;
mq                914 drivers/md/dm-cache-policy-smq.c 		__update_demote_sentinels(mq);
mq                918 drivers/md/dm-cache-policy-smq.c static void __sentinels_init(struct smq_policy *mq)
mq                924 drivers/md/dm-cache-policy-smq.c 		sentinel = writeback_sentinel(mq, level);
mq                926 drivers/md/dm-cache-policy-smq.c 		q_push(&mq->dirty, sentinel);
mq                928 drivers/md/dm-cache-policy-smq.c 		sentinel = demote_sentinel(mq, level);
mq                930 drivers/md/dm-cache-policy-smq.c 		q_push(&mq->clean, sentinel);
mq                934 drivers/md/dm-cache-policy-smq.c static void sentinels_init(struct smq_policy *mq)
mq                936 drivers/md/dm-cache-policy-smq.c 	mq->next_writeback_period = jiffies + WRITEBACK_PERIOD;
mq                937 drivers/md/dm-cache-policy-smq.c 	mq->next_demote_period = jiffies + DEMOTE_PERIOD;
mq                939 drivers/md/dm-cache-policy-smq.c 	mq->current_writeback_sentinels = false;
mq                940 drivers/md/dm-cache-policy-smq.c 	mq->current_demote_sentinels = false;
mq                941 drivers/md/dm-cache-policy-smq.c 	__sentinels_init(mq);
mq                943 drivers/md/dm-cache-policy-smq.c 	mq->current_writeback_sentinels = !mq->current_writeback_sentinels;
mq                944 drivers/md/dm-cache-policy-smq.c 	mq->current_demote_sentinels = !mq->current_demote_sentinels;
mq                945 drivers/md/dm-cache-policy-smq.c 	__sentinels_init(mq);
mq                950 drivers/md/dm-cache-policy-smq.c static void del_queue(struct smq_policy *mq, struct entry *e)
mq                952 drivers/md/dm-cache-policy-smq.c 	q_del(e->dirty ? &mq->dirty : &mq->clean, e);
mq                955 drivers/md/dm-cache-policy-smq.c static void push_queue(struct smq_policy *mq, struct entry *e)
mq                958 drivers/md/dm-cache-policy-smq.c 		q_push(&mq->dirty, e);
mq                960 drivers/md/dm-cache-policy-smq.c 		q_push(&mq->clean, e);
mq                964 drivers/md/dm-cache-policy-smq.c static void push(struct smq_policy *mq, struct entry *e)
mq                966 drivers/md/dm-cache-policy-smq.c 	h_insert(&mq->table, e);
mq                968 drivers/md/dm-cache-policy-smq.c 		push_queue(mq, e);
mq                971 drivers/md/dm-cache-policy-smq.c static void push_queue_front(struct smq_policy *mq, struct entry *e)
mq                974 drivers/md/dm-cache-policy-smq.c 		q_push_front(&mq->dirty, e);
mq                976 drivers/md/dm-cache-policy-smq.c 		q_push_front(&mq->clean, e);
mq                979 drivers/md/dm-cache-policy-smq.c static void push_front(struct smq_policy *mq, struct entry *e)
mq                981 drivers/md/dm-cache-policy-smq.c 	h_insert(&mq->table, e);
mq                983 drivers/md/dm-cache-policy-smq.c 		push_queue_front(mq, e);
mq                986 drivers/md/dm-cache-policy-smq.c static dm_cblock_t infer_cblock(struct smq_policy *mq, struct entry *e)
mq                988 drivers/md/dm-cache-policy-smq.c 	return to_cblock(get_index(&mq->cache_alloc, e));
mq                991 drivers/md/dm-cache-policy-smq.c static void requeue(struct smq_policy *mq, struct entry *e)
mq                999 drivers/md/dm-cache-policy-smq.c 	if (!test_and_set_bit(from_cblock(infer_cblock(mq, e)), mq->cache_hit_bits)) {
mq               1001 drivers/md/dm-cache-policy-smq.c 			q_requeue(&mq->clean, e, 1u, NULL, NULL);
mq               1005 drivers/md/dm-cache-policy-smq.c 		q_requeue(&mq->dirty, e, 1u,
mq               1006 drivers/md/dm-cache-policy-smq.c 			  get_sentinel(&mq->writeback_sentinel_alloc, e->level, !mq->current_writeback_sentinels),
mq               1007 drivers/md/dm-cache-policy-smq.c 			  get_sentinel(&mq->writeback_sentinel_alloc, e->level, mq->current_writeback_sentinels));
mq               1011 drivers/md/dm-cache-policy-smq.c static unsigned default_promote_level(struct smq_policy *mq)
mq               1031 drivers/md/dm-cache-policy-smq.c 	unsigned hits = mq->cache_stats.hits;
mq               1032 drivers/md/dm-cache-policy-smq.c 	unsigned misses = mq->cache_stats.misses;
mq               1037 drivers/md/dm-cache-policy-smq.c static void update_promote_levels(struct smq_policy *mq)
mq               1043 drivers/md/dm-cache-policy-smq.c 	unsigned threshold_level = allocator_empty(&mq->cache_alloc) ?
mq               1044 drivers/md/dm-cache-policy-smq.c 		default_promote_level(mq) : (NR_HOTSPOT_LEVELS / 2u);
mq               1053 drivers/md/dm-cache-policy-smq.c 	switch (stats_assess(&mq->hotspot_stats)) {
mq               1066 drivers/md/dm-cache-policy-smq.c 	mq->read_promote_level = NR_HOTSPOT_LEVELS - threshold_level;
mq               1067 drivers/md/dm-cache-policy-smq.c 	mq->write_promote_level = (NR_HOTSPOT_LEVELS - threshold_level);
mq               1074 drivers/md/dm-cache-policy-smq.c static void update_level_jump(struct smq_policy *mq)
mq               1076 drivers/md/dm-cache-policy-smq.c 	switch (stats_assess(&mq->hotspot_stats)) {
mq               1078 drivers/md/dm-cache-policy-smq.c 		mq->hotspot_level_jump = 4u;
mq               1082 drivers/md/dm-cache-policy-smq.c 		mq->hotspot_level_jump = 2u;
mq               1086 drivers/md/dm-cache-policy-smq.c 		mq->hotspot_level_jump = 1u;
mq               1091 drivers/md/dm-cache-policy-smq.c static void end_hotspot_period(struct smq_policy *mq)
mq               1093 drivers/md/dm-cache-policy-smq.c 	clear_bitset(mq->hotspot_hit_bits, mq->nr_hotspot_blocks);
mq               1094 drivers/md/dm-cache-policy-smq.c 	update_promote_levels(mq);
mq               1096 drivers/md/dm-cache-policy-smq.c 	if (time_after(jiffies, mq->next_hotspot_period)) {
mq               1097 drivers/md/dm-cache-policy-smq.c 		update_level_jump(mq);
mq               1098 drivers/md/dm-cache-policy-smq.c 		q_redistribute(&mq->hotspot);
mq               1099 drivers/md/dm-cache-policy-smq.c 		stats_reset(&mq->hotspot_stats);
mq               1100 drivers/md/dm-cache-policy-smq.c 		mq->next_hotspot_period = jiffies + HOTSPOT_UPDATE_PERIOD;
mq               1104 drivers/md/dm-cache-policy-smq.c static void end_cache_period(struct smq_policy *mq)
mq               1106 drivers/md/dm-cache-policy-smq.c 	if (time_after(jiffies, mq->next_cache_period)) {
mq               1107 drivers/md/dm-cache-policy-smq.c 		clear_bitset(mq->cache_hit_bits, from_cblock(mq->cache_size));
mq               1109 drivers/md/dm-cache-policy-smq.c 		q_redistribute(&mq->dirty);
mq               1110 drivers/md/dm-cache-policy-smq.c 		q_redistribute(&mq->clean);
mq               1111 drivers/md/dm-cache-policy-smq.c 		stats_reset(&mq->cache_stats);
mq               1113 drivers/md/dm-cache-policy-smq.c 		mq->next_cache_period = jiffies + CACHE_UPDATE_PERIOD;
mq               1125 drivers/md/dm-cache-policy-smq.c static unsigned percent_to_target(struct smq_policy *mq, unsigned p)
mq               1127 drivers/md/dm-cache-policy-smq.c 	return from_cblock(mq->cache_size) * p / 100u;
mq               1130 drivers/md/dm-cache-policy-smq.c static bool clean_target_met(struct smq_policy *mq, bool idle)
mq               1140 drivers/md/dm-cache-policy-smq.c 		return q_size(&mq->dirty) == 0u;
mq               1149 drivers/md/dm-cache-policy-smq.c static bool free_target_met(struct smq_policy *mq)
mq               1153 drivers/md/dm-cache-policy-smq.c 	nr_free = from_cblock(mq->cache_size) - mq->cache_alloc.nr_allocated;
mq               1154 drivers/md/dm-cache-policy-smq.c 	return (nr_free + btracker_nr_demotions_queued(mq->bg_work)) >=
mq               1155 drivers/md/dm-cache-policy-smq.c 		percent_to_target(mq, FREE_TARGET);
mq               1160 drivers/md/dm-cache-policy-smq.c static void mark_pending(struct smq_policy *mq, struct entry *e)
mq               1168 drivers/md/dm-cache-policy-smq.c static void clear_pending(struct smq_policy *mq, struct entry *e)
mq               1174 drivers/md/dm-cache-policy-smq.c static void queue_writeback(struct smq_policy *mq, bool idle)
mq               1180 drivers/md/dm-cache-policy-smq.c 	e = q_peek(&mq->dirty, mq->dirty.nr_levels, idle);
mq               1182 drivers/md/dm-cache-policy-smq.c 		mark_pending(mq, e);
mq               1183 drivers/md/dm-cache-policy-smq.c 		q_del(&mq->dirty, e);
mq               1187 drivers/md/dm-cache-policy-smq.c 		work.cblock = infer_cblock(mq, e);
mq               1189 drivers/md/dm-cache-policy-smq.c 		r = btracker_queue(mq->bg_work, &work, NULL);
mq               1191 drivers/md/dm-cache-policy-smq.c 			clear_pending(mq, e);
mq               1192 drivers/md/dm-cache-policy-smq.c 			q_push_front(&mq->dirty, e);
mq               1197 drivers/md/dm-cache-policy-smq.c static void queue_demotion(struct smq_policy *mq)
mq               1203 drivers/md/dm-cache-policy-smq.c 	if (WARN_ON_ONCE(!mq->migrations_allowed))
mq               1206 drivers/md/dm-cache-policy-smq.c 	e = q_peek(&mq->clean, mq->clean.nr_levels / 2, true);
mq               1208 drivers/md/dm-cache-policy-smq.c 		if (!clean_target_met(mq, true))
mq               1209 drivers/md/dm-cache-policy-smq.c 			queue_writeback(mq, false);
mq               1213 drivers/md/dm-cache-policy-smq.c 	mark_pending(mq, e);
mq               1214 drivers/md/dm-cache-policy-smq.c 	q_del(&mq->clean, e);
mq               1218 drivers/md/dm-cache-policy-smq.c 	work.cblock = infer_cblock(mq, e);
mq               1219 drivers/md/dm-cache-policy-smq.c 	r = btracker_queue(mq->bg_work, &work, NULL);
mq               1221 drivers/md/dm-cache-policy-smq.c 		clear_pending(mq, e);
mq               1222 drivers/md/dm-cache-policy-smq.c 		q_push_front(&mq->clean, e);
mq               1226 drivers/md/dm-cache-policy-smq.c static void queue_promotion(struct smq_policy *mq, dm_oblock_t oblock,
mq               1233 drivers/md/dm-cache-policy-smq.c 	if (!mq->migrations_allowed)
mq               1236 drivers/md/dm-cache-policy-smq.c 	if (allocator_empty(&mq->cache_alloc)) {
mq               1241 drivers/md/dm-cache-policy-smq.c 		if (!free_target_met(mq))
mq               1242 drivers/md/dm-cache-policy-smq.c 			queue_demotion(mq);
mq               1246 drivers/md/dm-cache-policy-smq.c 	if (btracker_promotion_already_present(mq->bg_work, oblock))
mq               1253 drivers/md/dm-cache-policy-smq.c 	e = alloc_entry(&mq->cache_alloc);
mq               1258 drivers/md/dm-cache-policy-smq.c 	work.cblock = infer_cblock(mq, e);
mq               1259 drivers/md/dm-cache-policy-smq.c 	r = btracker_queue(mq->bg_work, &work, workp);
mq               1261 drivers/md/dm-cache-policy-smq.c 		free_entry(&mq->cache_alloc, e);
mq               1280 drivers/md/dm-cache-policy-smq.c static enum promote_result should_promote(struct smq_policy *mq, struct entry *hs_e,
mq               1284 drivers/md/dm-cache-policy-smq.c 		if (!allocator_empty(&mq->cache_alloc) && fast_promote)
mq               1287 drivers/md/dm-cache-policy-smq.c 		return maybe_promote(hs_e->level >= mq->write_promote_level);
mq               1289 drivers/md/dm-cache-policy-smq.c 		return maybe_promote(hs_e->level >= mq->read_promote_level);
mq               1292 drivers/md/dm-cache-policy-smq.c static dm_oblock_t to_hblock(struct smq_policy *mq, dm_oblock_t b)
mq               1295 drivers/md/dm-cache-policy-smq.c 	(void) sector_div(r, mq->cache_blocks_per_hotspot_block);
mq               1299 drivers/md/dm-cache-policy-smq.c static struct entry *update_hotspot_queue(struct smq_policy *mq, dm_oblock_t b)
mq               1302 drivers/md/dm-cache-policy-smq.c 	dm_oblock_t hb = to_hblock(mq, b);
mq               1303 drivers/md/dm-cache-policy-smq.c 	struct entry *e = h_lookup(&mq->hotspot_table, hb);
mq               1306 drivers/md/dm-cache-policy-smq.c 		stats_level_accessed(&mq->hotspot_stats, e->level);
mq               1308 drivers/md/dm-cache-policy-smq.c 		hi = get_index(&mq->hotspot_alloc, e);
mq               1309 drivers/md/dm-cache-policy-smq.c 		q_requeue(&mq->hotspot, e,
mq               1310 drivers/md/dm-cache-policy-smq.c 			  test_and_set_bit(hi, mq->hotspot_hit_bits) ?
mq               1311 drivers/md/dm-cache-policy-smq.c 			  0u : mq->hotspot_level_jump,
mq               1315 drivers/md/dm-cache-policy-smq.c 		stats_miss(&mq->hotspot_stats);
mq               1317 drivers/md/dm-cache-policy-smq.c 		e = alloc_entry(&mq->hotspot_alloc);
mq               1319 drivers/md/dm-cache-policy-smq.c 			e = q_pop(&mq->hotspot);
mq               1321 drivers/md/dm-cache-policy-smq.c 				h_remove(&mq->hotspot_table, e);
mq               1322 drivers/md/dm-cache-policy-smq.c 				hi = get_index(&mq->hotspot_alloc, e);
mq               1323 drivers/md/dm-cache-policy-smq.c 				clear_bit(hi, mq->hotspot_hit_bits);
mq               1330 drivers/md/dm-cache-policy-smq.c 			q_push(&mq->hotspot, e);
mq               1331 drivers/md/dm-cache-policy-smq.c 			h_insert(&mq->hotspot_table, e);
mq               1352 drivers/md/dm-cache-policy-smq.c 	struct smq_policy *mq = to_smq_policy(p);
mq               1354 drivers/md/dm-cache-policy-smq.c 	btracker_destroy(mq->bg_work);
mq               1355 drivers/md/dm-cache-policy-smq.c 	h_exit(&mq->hotspot_table);
mq               1356 drivers/md/dm-cache-policy-smq.c 	h_exit(&mq->table);
mq               1357 drivers/md/dm-cache-policy-smq.c 	free_bitset(mq->hotspot_hit_bits);
mq               1358 drivers/md/dm-cache-policy-smq.c 	free_bitset(mq->cache_hit_bits);
mq               1359 drivers/md/dm-cache-policy-smq.c 	space_exit(&mq->es);
mq               1360 drivers/md/dm-cache-policy-smq.c 	kfree(mq);
mq               1365 drivers/md/dm-cache-policy-smq.c static int __lookup(struct smq_policy *mq, dm_oblock_t oblock, dm_cblock_t *cblock,
mq               1374 drivers/md/dm-cache-policy-smq.c 	e = h_lookup(&mq->table, oblock);
mq               1376 drivers/md/dm-cache-policy-smq.c 		stats_level_accessed(&mq->cache_stats, e->level);
mq               1378 drivers/md/dm-cache-policy-smq.c 		requeue(mq, e);
mq               1379 drivers/md/dm-cache-policy-smq.c 		*cblock = infer_cblock(mq, e);
mq               1383 drivers/md/dm-cache-policy-smq.c 		stats_miss(&mq->cache_stats);
mq               1388 drivers/md/dm-cache-policy-smq.c 		hs_e = update_hotspot_queue(mq, oblock);
mq               1390 drivers/md/dm-cache-policy-smq.c 		pr = should_promote(mq, hs_e, data_dir, fast_copy);
mq               1392 drivers/md/dm-cache-policy-smq.c 			queue_promotion(mq, oblock, work);
mq               1406 drivers/md/dm-cache-policy-smq.c 	struct smq_policy *mq = to_smq_policy(p);
mq               1408 drivers/md/dm-cache-policy-smq.c 	spin_lock_irqsave(&mq->lock, flags);
mq               1409 drivers/md/dm-cache-policy-smq.c 	r = __lookup(mq, oblock, cblock,
mq               1412 drivers/md/dm-cache-policy-smq.c 	spin_unlock_irqrestore(&mq->lock, flags);
mq               1425 drivers/md/dm-cache-policy-smq.c 	struct smq_policy *mq = to_smq_policy(p);
mq               1427 drivers/md/dm-cache-policy-smq.c 	spin_lock_irqsave(&mq->lock, flags);
mq               1428 drivers/md/dm-cache-policy-smq.c 	r = __lookup(mq, oblock, cblock, data_dir, fast_copy, work, &background_queued);
mq               1429 drivers/md/dm-cache-policy-smq.c 	spin_unlock_irqrestore(&mq->lock, flags);
mq               1439 drivers/md/dm-cache-policy-smq.c 	struct smq_policy *mq = to_smq_policy(p);
mq               1441 drivers/md/dm-cache-policy-smq.c 	spin_lock_irqsave(&mq->lock, flags);
mq               1442 drivers/md/dm-cache-policy-smq.c 	r = btracker_issue(mq->bg_work, result);
mq               1444 drivers/md/dm-cache-policy-smq.c 		if (!clean_target_met(mq, idle)) {
mq               1445 drivers/md/dm-cache-policy-smq.c 			queue_writeback(mq, idle);
mq               1446 drivers/md/dm-cache-policy-smq.c 			r = btracker_issue(mq->bg_work, result);
mq               1449 drivers/md/dm-cache-policy-smq.c 	spin_unlock_irqrestore(&mq->lock, flags);
mq               1458 drivers/md/dm-cache-policy-smq.c static void __complete_background_work(struct smq_policy *mq,
mq               1462 drivers/md/dm-cache-policy-smq.c 	struct entry *e = get_entry(&mq->cache_alloc,
mq               1468 drivers/md/dm-cache-policy-smq.c 		clear_pending(mq, e);
mq               1472 drivers/md/dm-cache-policy-smq.c 			push(mq, e);
mq               1475 drivers/md/dm-cache-policy-smq.c 			free_entry(&mq->cache_alloc, e);
mq               1483 drivers/md/dm-cache-policy-smq.c 			h_remove(&mq->table, e);
mq               1484 drivers/md/dm-cache-policy-smq.c 			free_entry(&mq->cache_alloc, e);
mq               1487 drivers/md/dm-cache-policy-smq.c 			clear_pending(mq, e);
mq               1488 drivers/md/dm-cache-policy-smq.c 			push_queue(mq, e);
mq               1495 drivers/md/dm-cache-policy-smq.c 		clear_pending(mq, e);
mq               1496 drivers/md/dm-cache-policy-smq.c 		push_queue(mq, e);
mq               1501 drivers/md/dm-cache-policy-smq.c 	btracker_complete(mq->bg_work, work);
mq               1509 drivers/md/dm-cache-policy-smq.c 	struct smq_policy *mq = to_smq_policy(p);
mq               1511 drivers/md/dm-cache-policy-smq.c 	spin_lock_irqsave(&mq->lock, flags);
mq               1512 drivers/md/dm-cache-policy-smq.c 	__complete_background_work(mq, work, success);
mq               1513 drivers/md/dm-cache-policy-smq.c 	spin_unlock_irqrestore(&mq->lock, flags);
mq               1517 drivers/md/dm-cache-policy-smq.c static void __smq_set_clear_dirty(struct smq_policy *mq, dm_cblock_t cblock, bool set)
mq               1519 drivers/md/dm-cache-policy-smq.c 	struct entry *e = get_entry(&mq->cache_alloc, from_cblock(cblock));
mq               1524 drivers/md/dm-cache-policy-smq.c 		del_queue(mq, e);
mq               1526 drivers/md/dm-cache-policy-smq.c 		push_queue(mq, e);
mq               1533 drivers/md/dm-cache-policy-smq.c 	struct smq_policy *mq = to_smq_policy(p);
mq               1535 drivers/md/dm-cache-policy-smq.c 	spin_lock_irqsave(&mq->lock, flags);
mq               1536 drivers/md/dm-cache-policy-smq.c 	__smq_set_clear_dirty(mq, cblock, true);
mq               1537 drivers/md/dm-cache-policy-smq.c 	spin_unlock_irqrestore(&mq->lock, flags);
mq               1542 drivers/md/dm-cache-policy-smq.c 	struct smq_policy *mq = to_smq_policy(p);
mq               1545 drivers/md/dm-cache-policy-smq.c 	spin_lock_irqsave(&mq->lock, flags);
mq               1546 drivers/md/dm-cache-policy-smq.c 	__smq_set_clear_dirty(mq, cblock, false);
mq               1547 drivers/md/dm-cache-policy-smq.c 	spin_unlock_irqrestore(&mq->lock, flags);
mq               1559 drivers/md/dm-cache-policy-smq.c 	struct smq_policy *mq = to_smq_policy(p);
mq               1562 drivers/md/dm-cache-policy-smq.c 	e = alloc_particular_entry(&mq->cache_alloc, from_cblock(cblock));
mq               1572 drivers/md/dm-cache-policy-smq.c 	push_front(mq, e);
mq               1579 drivers/md/dm-cache-policy-smq.c 	struct smq_policy *mq = to_smq_policy(p);
mq               1580 drivers/md/dm-cache-policy-smq.c 	struct entry *e = get_entry(&mq->cache_alloc, from_cblock(cblock));
mq               1586 drivers/md/dm-cache-policy-smq.c 	del_queue(mq, e);
mq               1587 drivers/md/dm-cache-policy-smq.c 	h_remove(&mq->table, e);
mq               1588 drivers/md/dm-cache-policy-smq.c 	free_entry(&mq->cache_alloc, e);
mq               1594 drivers/md/dm-cache-policy-smq.c 	struct smq_policy *mq = to_smq_policy(p);
mq               1595 drivers/md/dm-cache-policy-smq.c 	struct entry *e = get_entry(&mq->cache_alloc, from_cblock(cblock));
mq               1607 drivers/md/dm-cache-policy-smq.c 	struct smq_policy *mq = to_smq_policy(p);
mq               1609 drivers/md/dm-cache-policy-smq.c 	spin_lock_irqsave(&mq->lock, flags);
mq               1610 drivers/md/dm-cache-policy-smq.c 	r = to_cblock(mq->cache_alloc.nr_allocated);
mq               1611 drivers/md/dm-cache-policy-smq.c 	spin_unlock_irqrestore(&mq->lock, flags);
mq               1618 drivers/md/dm-cache-policy-smq.c 	struct smq_policy *mq = to_smq_policy(p);
mq               1621 drivers/md/dm-cache-policy-smq.c 	spin_lock_irqsave(&mq->lock, flags);
mq               1622 drivers/md/dm-cache-policy-smq.c 	mq->tick++;
mq               1623 drivers/md/dm-cache-policy-smq.c 	update_sentinels(mq);
mq               1624 drivers/md/dm-cache-policy-smq.c 	end_hotspot_period(mq);
mq               1625 drivers/md/dm-cache-policy-smq.c 	end_cache_period(mq);
mq               1626 drivers/md/dm-cache-policy-smq.c 	spin_unlock_irqrestore(&mq->lock, flags);
mq               1631 drivers/md/dm-cache-policy-smq.c 	struct smq_policy *mq = to_smq_policy(p);
mq               1632 drivers/md/dm-cache-policy-smq.c 	mq->migrations_allowed = allow;
mq               1676 drivers/md/dm-cache-policy-smq.c static void init_policy_functions(struct smq_policy *mq, bool mimic_mq)
mq               1678 drivers/md/dm-cache-policy-smq.c 	mq->policy.destroy = smq_destroy;
mq               1679 drivers/md/dm-cache-policy-smq.c 	mq->policy.lookup = smq_lookup;
mq               1680 drivers/md/dm-cache-policy-smq.c 	mq->policy.lookup_with_work = smq_lookup_with_work;
mq               1681 drivers/md/dm-cache-policy-smq.c 	mq->policy.get_background_work = smq_get_background_work;
mq               1682 drivers/md/dm-cache-policy-smq.c 	mq->policy.complete_background_work = smq_complete_background_work;
mq               1683 drivers/md/dm-cache-policy-smq.c 	mq->policy.set_dirty = smq_set_dirty;
mq               1684 drivers/md/dm-cache-policy-smq.c 	mq->policy.clear_dirty = smq_clear_dirty;
mq               1685 drivers/md/dm-cache-policy-smq.c 	mq->policy.load_mapping = smq_load_mapping;
mq               1686 drivers/md/dm-cache-policy-smq.c 	mq->policy.invalidate_mapping = smq_invalidate_mapping;
mq               1687 drivers/md/dm-cache-policy-smq.c 	mq->policy.get_hint = smq_get_hint;
mq               1688 drivers/md/dm-cache-policy-smq.c 	mq->policy.residency = smq_residency;
mq               1689 drivers/md/dm-cache-policy-smq.c 	mq->policy.tick = smq_tick;
mq               1690 drivers/md/dm-cache-policy-smq.c 	mq->policy.allow_migrations = smq_allow_migrations;
mq               1693 drivers/md/dm-cache-policy-smq.c 		mq->policy.set_config_value = mq_set_config_value;
mq               1694 drivers/md/dm-cache-policy-smq.c 		mq->policy.emit_config_values = mq_emit_config_values;
mq               1728 drivers/md/dm-cache-policy-smq.c 	struct smq_policy *mq = kzalloc(sizeof(*mq), GFP_KERNEL);
mq               1730 drivers/md/dm-cache-policy-smq.c 	if (!mq)
mq               1733 drivers/md/dm-cache-policy-smq.c 	init_policy_functions(mq, mimic_mq);
mq               1734 drivers/md/dm-cache-policy-smq.c 	mq->cache_size = cache_size;
mq               1735 drivers/md/dm-cache-policy-smq.c 	mq->cache_block_size = cache_block_size;
mq               1738 drivers/md/dm-cache-policy-smq.c 			    &mq->hotspot_block_size, &mq->nr_hotspot_blocks);
mq               1740 drivers/md/dm-cache-policy-smq.c 	mq->cache_blocks_per_hotspot_block = div64_u64(mq->hotspot_block_size, mq->cache_block_size);
mq               1741 drivers/md/dm-cache-policy-smq.c 	mq->hotspot_level_jump = 1u;
mq               1742 drivers/md/dm-cache-policy-smq.c 	if (space_init(&mq->es, total_sentinels + mq->nr_hotspot_blocks + from_cblock(cache_size))) {
mq               1747 drivers/md/dm-cache-policy-smq.c 	init_allocator(&mq->writeback_sentinel_alloc, &mq->es, 0, nr_sentinels_per_queue);
mq               1749 drivers/md/dm-cache-policy-smq.c 		get_entry(&mq->writeback_sentinel_alloc, i)->sentinel = true;
mq               1751 drivers/md/dm-cache-policy-smq.c 	init_allocator(&mq->demote_sentinel_alloc, &mq->es, nr_sentinels_per_queue, total_sentinels);
mq               1753 drivers/md/dm-cache-policy-smq.c 		get_entry(&mq->demote_sentinel_alloc, i)->sentinel = true;
mq               1755 drivers/md/dm-cache-policy-smq.c 	init_allocator(&mq->hotspot_alloc, &mq->es, total_sentinels,
mq               1756 drivers/md/dm-cache-policy-smq.c 		       total_sentinels + mq->nr_hotspot_blocks);
mq               1758 drivers/md/dm-cache-policy-smq.c 	init_allocator(&mq->cache_alloc, &mq->es,
mq               1759 drivers/md/dm-cache-policy-smq.c 		       total_sentinels + mq->nr_hotspot_blocks,
mq               1760 drivers/md/dm-cache-policy-smq.c 		       total_sentinels + mq->nr_hotspot_blocks + from_cblock(cache_size));
mq               1762 drivers/md/dm-cache-policy-smq.c 	mq->hotspot_hit_bits = alloc_bitset(mq->nr_hotspot_blocks);
mq               1763 drivers/md/dm-cache-policy-smq.c 	if (!mq->hotspot_hit_bits) {
mq               1767 drivers/md/dm-cache-policy-smq.c 	clear_bitset(mq->hotspot_hit_bits, mq->nr_hotspot_blocks);
mq               1770 drivers/md/dm-cache-policy-smq.c 		mq->cache_hit_bits = alloc_bitset(from_cblock(cache_size));
mq               1771 drivers/md/dm-cache-policy-smq.c 		if (!mq->cache_hit_bits) {
mq               1775 drivers/md/dm-cache-policy-smq.c 		clear_bitset(mq->cache_hit_bits, from_cblock(mq->cache_size));
mq               1777 drivers/md/dm-cache-policy-smq.c 		mq->cache_hit_bits = NULL;
mq               1779 drivers/md/dm-cache-policy-smq.c 	mq->tick = 0;
mq               1780 drivers/md/dm-cache-policy-smq.c 	spin_lock_init(&mq->lock);
mq               1782 drivers/md/dm-cache-policy-smq.c 	q_init(&mq->hotspot, &mq->es, NR_HOTSPOT_LEVELS);
mq               1783 drivers/md/dm-cache-policy-smq.c 	mq->hotspot.nr_top_levels = 8;
mq               1784 drivers/md/dm-cache-policy-smq.c 	mq->hotspot.nr_in_top_levels = min(mq->nr_hotspot_blocks / NR_HOTSPOT_LEVELS,
mq               1785 drivers/md/dm-cache-policy-smq.c 					   from_cblock(mq->cache_size) / mq->cache_blocks_per_hotspot_block);
mq               1787 drivers/md/dm-cache-policy-smq.c 	q_init(&mq->clean, &mq->es, NR_CACHE_LEVELS);
mq               1788 drivers/md/dm-cache-policy-smq.c 	q_init(&mq->dirty, &mq->es, NR_CACHE_LEVELS);
mq               1790 drivers/md/dm-cache-policy-smq.c 	stats_init(&mq->hotspot_stats, NR_HOTSPOT_LEVELS);
mq               1791 drivers/md/dm-cache-policy-smq.c 	stats_init(&mq->cache_stats, NR_CACHE_LEVELS);
mq               1793 drivers/md/dm-cache-policy-smq.c 	if (h_init(&mq->table, &mq->es, from_cblock(cache_size)))
mq               1796 drivers/md/dm-cache-policy-smq.c 	if (h_init(&mq->hotspot_table, &mq->es, mq->nr_hotspot_blocks))
mq               1799 drivers/md/dm-cache-policy-smq.c 	sentinels_init(mq);
mq               1800 drivers/md/dm-cache-policy-smq.c 	mq->write_promote_level = mq->read_promote_level = NR_HOTSPOT_LEVELS;
mq               1802 drivers/md/dm-cache-policy-smq.c 	mq->next_hotspot_period = jiffies;
mq               1803 drivers/md/dm-cache-policy-smq.c 	mq->next_cache_period = jiffies;
mq               1805 drivers/md/dm-cache-policy-smq.c 	mq->bg_work = btracker_create(4096); /* FIXME: hard coded value */
mq               1806 drivers/md/dm-cache-policy-smq.c 	if (!mq->bg_work)
mq               1809 drivers/md/dm-cache-policy-smq.c 	mq->migrations_allowed = migrations_allowed;
mq               1811 drivers/md/dm-cache-policy-smq.c 	return &mq->policy;
mq               1814 drivers/md/dm-cache-policy-smq.c 	h_exit(&mq->hotspot_table);
mq               1816 drivers/md/dm-cache-policy-smq.c 	h_exit(&mq->table);
mq               1818 drivers/md/dm-cache-policy-smq.c 	free_bitset(mq->cache_hit_bits);
mq               1820 drivers/md/dm-cache-policy-smq.c 	free_bitset(mq->hotspot_hit_bits);
mq               1822 drivers/md/dm-cache-policy-smq.c 	space_exit(&mq->es);
mq               1824 drivers/md/dm-cache-policy-smq.c 	kfree(mq);
mq                133 drivers/misc/sgi-gru/grukservices.c #define HSTATUS(mq, h)	((mq) + offsetof(struct message_queue, hstatus[h]))
mq                547 drivers/misc/sgi-gru/grukservices.c 	struct message_queue *mq = p;
mq                551 drivers/misc/sgi-gru/grukservices.c 	memset(mq, 0, bytes);
mq                552 drivers/misc/sgi-gru/grukservices.c 	mq->start = &mq->data;
mq                553 drivers/misc/sgi-gru/grukservices.c 	mq->start2 = &mq->data + (qlines / 2 - 1) * GRU_CACHE_LINE_BYTES;
mq                554 drivers/misc/sgi-gru/grukservices.c 	mq->next = &mq->data;
mq                555 drivers/misc/sgi-gru/grukservices.c 	mq->limit = &mq->data + (qlines - 2) * GRU_CACHE_LINE_BYTES;
mq                556 drivers/misc/sgi-gru/grukservices.c 	mq->qlines = qlines;
mq                557 drivers/misc/sgi-gru/grukservices.c 	mq->hstatus[0] = 0;
mq                558 drivers/misc/sgi-gru/grukservices.c 	mq->hstatus[1] = 1;
mq                559 drivers/misc/sgi-gru/grukservices.c 	mq->head = gru_mesq_head(2, qlines / 2 + 1);
mq                560 drivers/misc/sgi-gru/grukservices.c 	mqd->mq = mq;
mq                561 drivers/misc/sgi-gru/grukservices.c 	mqd->mq_gpa = uv_gpa(mq);
mq                838 drivers/misc/sgi-gru/grukservices.c 	struct message_queue *mq = mqd->mq;
mq                839 drivers/misc/sgi-gru/grukservices.c 	struct message_header *mhdr = mq->next;
mq                848 drivers/misc/sgi-gru/grukservices.c 	pnext = mq->next;
mq                850 drivers/misc/sgi-gru/grukservices.c 	if (next == mq->limit) {
mq                851 drivers/misc/sgi-gru/grukservices.c 		next = mq->start;
mq                853 drivers/misc/sgi-gru/grukservices.c 	} else if (pnext < mq->start2 && next >= mq->start2) {
mq                858 drivers/misc/sgi-gru/grukservices.c 		mq->hstatus[half] = 1;
mq                859 drivers/misc/sgi-gru/grukservices.c 	mq->next = next;
mq                870 drivers/misc/sgi-gru/grukservices.c 	struct message_queue *mq = mqd->mq;
mq                871 drivers/misc/sgi-gru/grukservices.c 	struct message_header *mhdr = mq->next;
mq                877 drivers/misc/sgi-gru/grukservices.c 		mhdr = mq->next;
mq                997 drivers/misc/sgi-gru/grukservices.c 	void *p, *mq;
mq               1005 drivers/misc/sgi-gru/grukservices.c 	mq = ALIGNUP(p, 1024);
mq               1008 drivers/misc/sgi-gru/grukservices.c 	gru_create_message_queue(&mqd, mq, 8 * GRU_CACHE_LINE_BYTES, 0, 0, 0);
mq                 32 drivers/misc/sgi-gru/grukservices.h 	void		*mq;			/* message queue vaddress */
mq                112 drivers/misc/sgi-xp/xpc_uv.c xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq, int cpu, char *irq_name)
mq                114 drivers/misc/sgi-xp/xpc_uv.c 	int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
mq                117 drivers/misc/sgi-xp/xpc_uv.c 	mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset,
mq                119 drivers/misc/sgi-xp/xpc_uv.c 	if (mq->irq < 0)
mq                120 drivers/misc/sgi-xp/xpc_uv.c 		return mq->irq;
mq                122 drivers/misc/sgi-xp/xpc_uv.c 	mq->mmr_value = uv_read_global_mmr64(mmr_pnode, mq->mmr_offset);
mq                126 drivers/misc/sgi-xp/xpc_uv.c 		mq->irq = SGI_XPC_ACTIVATE;
mq                128 drivers/misc/sgi-xp/xpc_uv.c 		mq->irq = SGI_XPC_NOTIFY;
mq                132 drivers/misc/sgi-xp/xpc_uv.c 	mq->mmr_value = (unsigned long)cpu_physical_id(cpu) << 32 | mq->irq;
mq                133 drivers/misc/sgi-xp/xpc_uv.c 	uv_write_global_mmr64(mmr_pnode, mq->mmr_offset, mq->mmr_value);
mq                142 drivers/misc/sgi-xp/xpc_uv.c xpc_release_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq)
mq                145 drivers/misc/sgi-xp/xpc_uv.c 	uv_teardown_irq(mq->irq);
mq                151 drivers/misc/sgi-xp/xpc_uv.c 	mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
mq                154 drivers/misc/sgi-xp/xpc_uv.c 	uv_write_global_mmr64(mmr_pnode, mq->mmr_offset, mmr_value);
mq                161 drivers/misc/sgi-xp/xpc_uv.c xpc_gru_mq_watchlist_alloc_uv(struct xpc_gru_mq_uv *mq)
mq                166 drivers/misc/sgi-xp/xpc_uv.c 	int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
mq                168 drivers/misc/sgi-xp/xpc_uv.c 	ret = sn_mq_watchlist_alloc(mmr_pnode, (void *)uv_gpa(mq->address),
mq                169 drivers/misc/sgi-xp/xpc_uv.c 				    mq->order, &mq->mmr_offset);
mq                176 drivers/misc/sgi-xp/xpc_uv.c 	ret = uv_bios_mq_watchlist_alloc(uv_gpa(mq->address),
mq                177 drivers/misc/sgi-xp/xpc_uv.c 					 mq->order, &mq->mmr_offset);
mq                187 drivers/misc/sgi-xp/xpc_uv.c 	mq->watchlist_num = ret;
mq                192 drivers/misc/sgi-xp/xpc_uv.c xpc_gru_mq_watchlist_free_uv(struct xpc_gru_mq_uv *mq)
mq                195 drivers/misc/sgi-xp/xpc_uv.c 	int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
mq                198 drivers/misc/sgi-xp/xpc_uv.c 	ret = uv_bios_mq_watchlist_free(mmr_pnode, mq->watchlist_num);
mq                201 drivers/misc/sgi-xp/xpc_uv.c 	ret = sn_mq_watchlist_free(mmr_pnode, mq->watchlist_num);
mq                218 drivers/misc/sgi-xp/xpc_uv.c 	struct xpc_gru_mq_uv *mq;
mq                221 drivers/misc/sgi-xp/xpc_uv.c 	mq = kmalloc(sizeof(struct xpc_gru_mq_uv), GFP_KERNEL);
mq                222 drivers/misc/sgi-xp/xpc_uv.c 	if (mq == NULL) {
mq                229 drivers/misc/sgi-xp/xpc_uv.c 	mq->gru_mq_desc = kzalloc(sizeof(struct gru_message_queue_desc),
mq                231 drivers/misc/sgi-xp/xpc_uv.c 	if (mq->gru_mq_desc == NULL) {
mq                239 drivers/misc/sgi-xp/xpc_uv.c 	mq->order = pg_order + PAGE_SHIFT;
mq                240 drivers/misc/sgi-xp/xpc_uv.c 	mq_size = 1UL << mq->order;
mq                242 drivers/misc/sgi-xp/xpc_uv.c 	mq->mmr_blade = uv_cpu_to_blade_id(cpu);
mq                254 drivers/misc/sgi-xp/xpc_uv.c 	mq->address = page_address(page);
mq                257 drivers/misc/sgi-xp/xpc_uv.c 	ret = xpc_gru_mq_watchlist_alloc_uv(mq);
mq                261 drivers/misc/sgi-xp/xpc_uv.c 	ret = xpc_get_gru_mq_irq_uv(mq, cpu, irq_name);
mq                265 drivers/misc/sgi-xp/xpc_uv.c 	ret = request_irq(mq->irq, irq_handler, 0, irq_name, NULL);
mq                268 drivers/misc/sgi-xp/xpc_uv.c 			mq->irq, -ret);
mq                274 drivers/misc/sgi-xp/xpc_uv.c 	mmr_value = (struct uv_IO_APIC_route_entry *)&mq->mmr_value;
mq                275 drivers/misc/sgi-xp/xpc_uv.c 	ret = gru_create_message_queue(mq->gru_mq_desc, mq->address, mq_size,
mq                285 drivers/misc/sgi-xp/xpc_uv.c 	xp_ret = xp_expand_memprotect(xp_pa(mq->address), mq_size);
mq                291 drivers/misc/sgi-xp/xpc_uv.c 	return mq;
mq                295 drivers/misc/sgi-xp/xpc_uv.c 	free_irq(mq->irq, NULL);
mq                297 drivers/misc/sgi-xp/xpc_uv.c 	xpc_release_gru_mq_irq_uv(mq);
mq                299 drivers/misc/sgi-xp/xpc_uv.c 	xpc_gru_mq_watchlist_free_uv(mq);
mq                301 drivers/misc/sgi-xp/xpc_uv.c 	free_pages((unsigned long)mq->address, pg_order);
mq                303 drivers/misc/sgi-xp/xpc_uv.c 	kfree(mq->gru_mq_desc);
mq                305 drivers/misc/sgi-xp/xpc_uv.c 	kfree(mq);
mq                311 drivers/misc/sgi-xp/xpc_uv.c xpc_destroy_gru_mq_uv(struct xpc_gru_mq_uv *mq)
mq                318 drivers/misc/sgi-xp/xpc_uv.c 	mq_size = 1UL << mq->order;
mq                319 drivers/misc/sgi-xp/xpc_uv.c 	ret = xp_restrict_memprotect(xp_pa(mq->address), mq_size);
mq                323 drivers/misc/sgi-xp/xpc_uv.c 	free_irq(mq->irq, NULL);
mq                324 drivers/misc/sgi-xp/xpc_uv.c 	xpc_release_gru_mq_irq_uv(mq);
mq                327 drivers/misc/sgi-xp/xpc_uv.c 	xpc_gru_mq_watchlist_free_uv(mq);
mq                329 drivers/misc/sgi-xp/xpc_uv.c 	pg_order = mq->order - PAGE_SHIFT;
mq                330 drivers/misc/sgi-xp/xpc_uv.c 	free_pages((unsigned long)mq->address, pg_order);
mq                332 drivers/misc/sgi-xp/xpc_uv.c 	kfree(mq);
mq                670 drivers/misc/sgi-xp/xpc_uv.c 		gru_mq_desc->mq = NULL;
mq                232 drivers/mmc/core/block.c 	struct mmc_queue *mq;
mq                243 drivers/mmc/core/block.c 	mq = &md->queue;
mq                246 drivers/mmc/core/block.c 	req = blk_get_request(mq->queue, REQ_OP_DRV_OUT, 0);
mq                252 drivers/mmc/core/block.c 	blk_execute_rq(mq->queue, NULL, req, 0);
mq                650 drivers/mmc/core/block.c 	struct mmc_queue *mq;
mq                670 drivers/mmc/core/block.c 	mq = &md->queue;
mq                671 drivers/mmc/core/block.c 	req = blk_get_request(mq->queue,
mq                682 drivers/mmc/core/block.c 	blk_execute_rq(mq->queue, NULL, req, 0);
mq                700 drivers/mmc/core/block.c 	struct mmc_queue *mq;
mq                740 drivers/mmc/core/block.c 	mq = &md->queue;
mq                741 drivers/mmc/core/block.c 	req = blk_get_request(mq->queue,
mq                751 drivers/mmc/core/block.c 	blk_execute_rq(mq->queue, NULL, req, 0);
mq               1027 drivers/mmc/core/block.c static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req)
mq               1030 drivers/mmc/core/block.c 	struct mmc_card *card = mq->card;
mq               1031 drivers/mmc/core/block.c 	struct mmc_blk_data *md = mq->blkdata;
mq               1086 drivers/mmc/core/block.c static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
mq               1088 drivers/mmc/core/block.c 	struct mmc_blk_data *md = mq->blkdata;
mq               1123 drivers/mmc/core/block.c static void mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
mq               1126 drivers/mmc/core/block.c 	struct mmc_blk_data *md = mq->blkdata;
mq               1193 drivers/mmc/core/block.c static void mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
mq               1195 drivers/mmc/core/block.c 	struct mmc_blk_data *md = mq->blkdata;
mq               1280 drivers/mmc/core/block.c static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
mq               1284 drivers/mmc/core/block.c 	struct mmc_blk_data *md = mq->blkdata;
mq               1384 drivers/mmc/core/block.c 	brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
mq               1414 drivers/mmc/core/block.c static void mmc_blk_cqe_complete_rq(struct mmc_queue *mq, struct request *req)
mq               1419 drivers/mmc/core/block.c 	struct mmc_host *host = mq->card->host;
mq               1420 drivers/mmc/core/block.c 	enum mmc_issue_type issue_type = mmc_issue_type(mq, req);
mq               1448 drivers/mmc/core/block.c 	spin_lock_irqsave(&mq->lock, flags);
mq               1450 drivers/mmc/core/block.c 	mq->in_flight[issue_type] -= 1;
mq               1452 drivers/mmc/core/block.c 	put_card = (mmc_tot_in_flight(mq) == 0);
mq               1454 drivers/mmc/core/block.c 	mmc_cqe_check_busy(mq);
mq               1456 drivers/mmc/core/block.c 	spin_unlock_irqrestore(&mq->lock, flags);
mq               1458 drivers/mmc/core/block.c 	if (!mq->cqe_busy)
mq               1462 drivers/mmc/core/block.c 		mmc_put_card(mq->card, &mq->ctx);
mq               1465 drivers/mmc/core/block.c void mmc_blk_cqe_recovery(struct mmc_queue *mq)
mq               1467 drivers/mmc/core/block.c 	struct mmc_card *card = mq->card;
mq               1475 drivers/mmc/core/block.c 		mmc_blk_reset(mq->blkdata, host, MMC_BLK_CQE_RECOVERY);
mq               1477 drivers/mmc/core/block.c 		mmc_blk_reset_success(mq->blkdata, MMC_BLK_CQE_RECOVERY);
mq               1488 drivers/mmc/core/block.c 	struct mmc_queue *mq = q->queuedata;
mq               1494 drivers/mmc/core/block.c 	if (mq->in_recovery)
mq               1495 drivers/mmc/core/block.c 		mmc_blk_cqe_complete_rq(mq, req);
mq               1521 drivers/mmc/core/block.c static int mmc_blk_cqe_issue_flush(struct mmc_queue *mq, struct request *req)
mq               1533 drivers/mmc/core/block.c 	return mmc_blk_cqe_start_req(mq->card->host, mrq);
mq               1536 drivers/mmc/core/block.c static int mmc_blk_cqe_issue_rw_rq(struct mmc_queue *mq, struct request *req)
mq               1540 drivers/mmc/core/block.c 	mmc_blk_data_prep(mq, mqrq, 0, NULL, NULL);
mq               1542 drivers/mmc/core/block.c 	return mmc_blk_cqe_start_req(mq->card->host, &mqrq->brq.mrq);
mq               1548 drivers/mmc/core/block.c 			       struct mmc_queue *mq)
mq               1553 drivers/mmc/core/block.c 	struct mmc_blk_data *md = mq->blkdata;
mq               1556 drivers/mmc/core/block.c 	mmc_blk_data_prep(mq, mqrq, disable_multi, &do_rel_wr, &do_data_tag);
mq               1648 drivers/mmc/core/block.c static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req)
mq               1652 drivers/mmc/core/block.c 	struct mmc_card *card = mq->card;
mq               1661 drivers/mmc/core/block.c 		mmc_blk_rw_rq_prep(mqrq, card, 1, mq);
mq               1719 drivers/mmc/core/block.c 	struct mmc_queue *mq = req->q->queuedata;
mq               1722 drivers/mmc/core/block.c 	if (mmc_host_is_spi(mq->card->host))
mq               1755 drivers/mmc/core/block.c static void mmc_blk_mq_rw_recovery(struct mmc_queue *mq, struct request *req)
mq               1760 drivers/mmc/core/block.c 	struct mmc_blk_data *md = mq->blkdata;
mq               1761 drivers/mmc/core/block.c 	struct mmc_card *card = mq->card;
mq               1791 drivers/mmc/core/block.c 	if (!mmc_host_is_spi(mq->card->host) &&
mq               1793 drivers/mmc/core/block.c 		err = mmc_blk_fix_state(mq->card, req);
mq               1808 drivers/mmc/core/block.c 	if (!mmc_host_is_spi(mq->card->host) &&
mq               1840 drivers/mmc/core/block.c 		mmc_blk_read_single(mq, req);
mq               1880 drivers/mmc/core/block.c static inline void mmc_blk_rw_reset_success(struct mmc_queue *mq,
mq               1885 drivers/mmc/core/block.c 	mmc_blk_reset_success(mq->blkdata, type);
mq               1888 drivers/mmc/core/block.c static void mmc_blk_mq_complete_rq(struct mmc_queue *mq, struct request *req)
mq               1903 drivers/mmc/core/block.c 		if (mmc_card_removed(mq->card))
mq               1909 drivers/mmc/core/block.c static bool mmc_blk_urgent_bkops_needed(struct mmc_queue *mq,
mq               1912 drivers/mmc/core/block.c 	return mmc_card_mmc(mq->card) && !mmc_host_is_spi(mq->card->host) &&
mq               1917 drivers/mmc/core/block.c static void mmc_blk_urgent_bkops(struct mmc_queue *mq,
mq               1920 drivers/mmc/core/block.c 	if (mmc_blk_urgent_bkops_needed(mq, mqrq))
mq               1921 drivers/mmc/core/block.c 		mmc_run_bkops(mq->card);
mq               1926 drivers/mmc/core/block.c 	struct mmc_queue *mq = req->q->queuedata;
mq               1928 drivers/mmc/core/block.c 	if (mq->use_cqe)
mq               1929 drivers/mmc/core/block.c 		mmc_blk_cqe_complete_rq(mq, req);
mq               1931 drivers/mmc/core/block.c 		mmc_blk_mq_complete_rq(mq, req);
mq               1934 drivers/mmc/core/block.c static void mmc_blk_mq_poll_completion(struct mmc_queue *mq,
mq               1938 drivers/mmc/core/block.c 	struct mmc_host *host = mq->card->host;
mq               1941 drivers/mmc/core/block.c 	    mmc_blk_card_busy(mq->card, req)) {
mq               1942 drivers/mmc/core/block.c 		mmc_blk_mq_rw_recovery(mq, req);
mq               1944 drivers/mmc/core/block.c 		mmc_blk_rw_reset_success(mq, req);
mq               1948 drivers/mmc/core/block.c 	mmc_blk_urgent_bkops(mq, mqrq);
mq               1951 drivers/mmc/core/block.c static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, struct request *req)
mq               1956 drivers/mmc/core/block.c 	spin_lock_irqsave(&mq->lock, flags);
mq               1958 drivers/mmc/core/block.c 	mq->in_flight[mmc_issue_type(mq, req)] -= 1;
mq               1960 drivers/mmc/core/block.c 	put_card = (mmc_tot_in_flight(mq) == 0);
mq               1962 drivers/mmc/core/block.c 	spin_unlock_irqrestore(&mq->lock, flags);
mq               1965 drivers/mmc/core/block.c 		mmc_put_card(mq->card, &mq->ctx);
mq               1968 drivers/mmc/core/block.c static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req)
mq               1972 drivers/mmc/core/block.c 	struct mmc_host *host = mq->card->host;
mq               1980 drivers/mmc/core/block.c 	if (mq->in_recovery)
mq               1981 drivers/mmc/core/block.c 		mmc_blk_mq_complete_rq(mq, req);
mq               1985 drivers/mmc/core/block.c 	mmc_blk_mq_dec_in_flight(mq, req);
mq               1988 drivers/mmc/core/block.c void mmc_blk_mq_recovery(struct mmc_queue *mq)
mq               1990 drivers/mmc/core/block.c 	struct request *req = mq->recovery_req;
mq               1991 drivers/mmc/core/block.c 	struct mmc_host *host = mq->card->host;
mq               1994 drivers/mmc/core/block.c 	mq->recovery_req = NULL;
mq               1995 drivers/mmc/core/block.c 	mq->rw_wait = false;
mq               1999 drivers/mmc/core/block.c 		mmc_blk_mq_rw_recovery(mq, req);
mq               2002 drivers/mmc/core/block.c 	mmc_blk_urgent_bkops(mq, mqrq);
mq               2004 drivers/mmc/core/block.c 	mmc_blk_mq_post_req(mq, req);
mq               2007 drivers/mmc/core/block.c static void mmc_blk_mq_complete_prev_req(struct mmc_queue *mq,
mq               2010 drivers/mmc/core/block.c 	if (mmc_host_done_complete(mq->card->host))
mq               2013 drivers/mmc/core/block.c 	mutex_lock(&mq->complete_lock);
mq               2015 drivers/mmc/core/block.c 	if (!mq->complete_req)
mq               2018 drivers/mmc/core/block.c 	mmc_blk_mq_poll_completion(mq, mq->complete_req);
mq               2021 drivers/mmc/core/block.c 		*prev_req = mq->complete_req;
mq               2023 drivers/mmc/core/block.c 		mmc_blk_mq_post_req(mq, mq->complete_req);
mq               2025 drivers/mmc/core/block.c 	mq->complete_req = NULL;
mq               2028 drivers/mmc/core/block.c 	mutex_unlock(&mq->complete_lock);
mq               2033 drivers/mmc/core/block.c 	struct mmc_queue *mq = container_of(work, struct mmc_queue,
mq               2036 drivers/mmc/core/block.c 	mmc_blk_mq_complete_prev_req(mq, NULL);
mq               2045 drivers/mmc/core/block.c 	struct mmc_queue *mq = q->queuedata;
mq               2046 drivers/mmc/core/block.c 	struct mmc_host *host = mq->card->host;
mq               2058 drivers/mmc/core/block.c 		spin_lock_irqsave(&mq->lock, flags);
mq               2059 drivers/mmc/core/block.c 		mq->complete_req = req;
mq               2060 drivers/mmc/core/block.c 		mq->rw_wait = false;
mq               2061 drivers/mmc/core/block.c 		waiting = mq->waiting;
mq               2062 drivers/mmc/core/block.c 		spin_unlock_irqrestore(&mq->lock, flags);
mq               2071 drivers/mmc/core/block.c 			wake_up(&mq->wait);
mq               2073 drivers/mmc/core/block.c 			queue_work(mq->card->complete_wq, &mq->complete_work);
mq               2080 drivers/mmc/core/block.c 	    mmc_blk_urgent_bkops_needed(mq, mqrq)) {
mq               2081 drivers/mmc/core/block.c 		spin_lock_irqsave(&mq->lock, flags);
mq               2082 drivers/mmc/core/block.c 		mq->recovery_needed = true;
mq               2083 drivers/mmc/core/block.c 		mq->recovery_req = req;
mq               2084 drivers/mmc/core/block.c 		spin_unlock_irqrestore(&mq->lock, flags);
mq               2085 drivers/mmc/core/block.c 		wake_up(&mq->wait);
mq               2086 drivers/mmc/core/block.c 		schedule_work(&mq->recovery_work);
mq               2090 drivers/mmc/core/block.c 	mmc_blk_rw_reset_success(mq, req);
mq               2092 drivers/mmc/core/block.c 	mq->rw_wait = false;
mq               2093 drivers/mmc/core/block.c 	wake_up(&mq->wait);
mq               2095 drivers/mmc/core/block.c 	mmc_blk_mq_post_req(mq, req);
mq               2098 drivers/mmc/core/block.c static bool mmc_blk_rw_wait_cond(struct mmc_queue *mq, int *err)
mq               2107 drivers/mmc/core/block.c 	spin_lock_irqsave(&mq->lock, flags);
mq               2108 drivers/mmc/core/block.c 	if (mq->recovery_needed) {
mq               2112 drivers/mmc/core/block.c 		done = !mq->rw_wait;
mq               2114 drivers/mmc/core/block.c 	mq->waiting = !done;
mq               2115 drivers/mmc/core/block.c 	spin_unlock_irqrestore(&mq->lock, flags);
mq               2120 drivers/mmc/core/block.c static int mmc_blk_rw_wait(struct mmc_queue *mq, struct request **prev_req)
mq               2124 drivers/mmc/core/block.c 	wait_event(mq->wait, mmc_blk_rw_wait_cond(mq, &err));
mq               2127 drivers/mmc/core/block.c 	mmc_blk_mq_complete_prev_req(mq, prev_req);
mq               2132 drivers/mmc/core/block.c static int mmc_blk_mq_issue_rw_rq(struct mmc_queue *mq,
mq               2136 drivers/mmc/core/block.c 	struct mmc_host *host = mq->card->host;
mq               2140 drivers/mmc/core/block.c 	mmc_blk_rw_rq_prep(mqrq, mq->card, 0, mq);
mq               2146 drivers/mmc/core/block.c 	err = mmc_blk_rw_wait(mq, &prev_req);
mq               2150 drivers/mmc/core/block.c 	mq->rw_wait = true;
mq               2155 drivers/mmc/core/block.c 		mmc_blk_mq_post_req(mq, prev_req);
mq               2158 drivers/mmc/core/block.c 		mq->rw_wait = false;
mq               2171 drivers/mmc/core/block.c static int mmc_blk_wait_for_idle(struct mmc_queue *mq, struct mmc_host *host)
mq               2173 drivers/mmc/core/block.c 	if (mq->use_cqe)
mq               2176 drivers/mmc/core/block.c 	return mmc_blk_rw_wait(mq, NULL);
mq               2179 drivers/mmc/core/block.c enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req)
mq               2181 drivers/mmc/core/block.c 	struct mmc_blk_data *md = mq->blkdata;
mq               2190 drivers/mmc/core/block.c 	switch (mmc_issue_type(mq, req)) {
mq               2192 drivers/mmc/core/block.c 		ret = mmc_blk_wait_for_idle(mq, host);
mq               2198 drivers/mmc/core/block.c 			mmc_blk_issue_drv_op(mq, req);
mq               2201 drivers/mmc/core/block.c 			mmc_blk_issue_discard_rq(mq, req);
mq               2204 drivers/mmc/core/block.c 			mmc_blk_issue_secdiscard_rq(mq, req);
mq               2207 drivers/mmc/core/block.c 			mmc_blk_issue_flush(mq, req);
mq               2218 drivers/mmc/core/block.c 			ret = mmc_blk_cqe_issue_flush(mq, req);
mq               2222 drivers/mmc/core/block.c 			if (mq->use_cqe)
mq               2223 drivers/mmc/core/block.c 				ret = mmc_blk_cqe_issue_rw_rq(mq, req);
mq               2225 drivers/mmc/core/block.c 				ret = mmc_blk_mq_issue_rw_rq(mq, req);
mq               2710 drivers/mmc/core/block.c 	struct mmc_queue *mq = &md->queue;
mq               2715 drivers/mmc/core/block.c 	req = blk_get_request(mq->queue, REQ_OP_DRV_IN, 0);
mq               2719 drivers/mmc/core/block.c 	blk_execute_rq(mq->queue, NULL, req, 0);
mq               2739 drivers/mmc/core/block.c 	struct mmc_queue *mq = &md->queue;
mq               2751 drivers/mmc/core/block.c 	req = blk_get_request(mq->queue, REQ_OP_DRV_IN, 0);
mq               2758 drivers/mmc/core/block.c 	blk_execute_rq(mq->queue, NULL, req, 0);
mq                  8 drivers/mmc/core/block.h void mmc_blk_cqe_recovery(struct mmc_queue *mq);
mq                 12 drivers/mmc/core/block.h enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req);
mq                 14 drivers/mmc/core/block.h void mmc_blk_mq_recovery(struct mmc_queue *mq);
mq                 26 drivers/mmc/core/queue.c static inline bool mmc_cqe_dcmd_busy(struct mmc_queue *mq)
mq                 29 drivers/mmc/core/queue.c 	return mq->in_flight[MMC_ISSUE_DCMD];
mq                 32 drivers/mmc/core/queue.c void mmc_cqe_check_busy(struct mmc_queue *mq)
mq                 34 drivers/mmc/core/queue.c 	if ((mq->cqe_busy & MMC_CQE_DCMD_BUSY) && !mmc_cqe_dcmd_busy(mq))
mq                 35 drivers/mmc/core/queue.c 		mq->cqe_busy &= ~MMC_CQE_DCMD_BUSY;
mq                 37 drivers/mmc/core/queue.c 	mq->cqe_busy &= ~MMC_CQE_QUEUE_FULL;
mq                 61 drivers/mmc/core/queue.c enum mmc_issue_type mmc_issue_type(struct mmc_queue *mq, struct request *req)
mq                 63 drivers/mmc/core/queue.c 	struct mmc_host *host = mq->card->host;
mq                 65 drivers/mmc/core/queue.c 	if (mq->use_cqe)
mq                 74 drivers/mmc/core/queue.c static void __mmc_cqe_recovery_notifier(struct mmc_queue *mq)
mq                 76 drivers/mmc/core/queue.c 	if (!mq->recovery_needed) {
mq                 77 drivers/mmc/core/queue.c 		mq->recovery_needed = true;
mq                 78 drivers/mmc/core/queue.c 		schedule_work(&mq->recovery_work);
mq                 88 drivers/mmc/core/queue.c 	struct mmc_queue *mq = q->queuedata;
mq                 91 drivers/mmc/core/queue.c 	spin_lock_irqsave(&mq->lock, flags);
mq                 92 drivers/mmc/core/queue.c 	__mmc_cqe_recovery_notifier(mq);
mq                 93 drivers/mmc/core/queue.c 	spin_unlock_irqrestore(&mq->lock, flags);
mq                100 drivers/mmc/core/queue.c 	struct mmc_queue *mq = req->q->queuedata;
mq                101 drivers/mmc/core/queue.c 	struct mmc_host *host = mq->card->host;
mq                102 drivers/mmc/core/queue.c 	enum mmc_issue_type issue_type = mmc_issue_type(mq, req);
mq                125 drivers/mmc/core/queue.c 	struct mmc_queue *mq = q->queuedata;
mq                129 drivers/mmc/core/queue.c 	spin_lock_irqsave(&mq->lock, flags);
mq                130 drivers/mmc/core/queue.c 	ignore_tout = mq->recovery_needed || !mq->use_cqe;
mq                131 drivers/mmc/core/queue.c 	spin_unlock_irqrestore(&mq->lock, flags);
mq                138 drivers/mmc/core/queue.c 	struct mmc_queue *mq = container_of(work, struct mmc_queue,
mq                140 drivers/mmc/core/queue.c 	struct request_queue *q = mq->queue;
mq                142 drivers/mmc/core/queue.c 	mmc_get_card(mq->card, &mq->ctx);
mq                144 drivers/mmc/core/queue.c 	mq->in_recovery = true;
mq                146 drivers/mmc/core/queue.c 	if (mq->use_cqe)
mq                147 drivers/mmc/core/queue.c 		mmc_blk_cqe_recovery(mq);
mq                149 drivers/mmc/core/queue.c 		mmc_blk_mq_recovery(mq);
mq                151 drivers/mmc/core/queue.c 	mq->in_recovery = false;
mq                153 drivers/mmc/core/queue.c 	spin_lock_irq(&mq->lock);
mq                154 drivers/mmc/core/queue.c 	mq->recovery_needed = false;
mq                155 drivers/mmc/core/queue.c 	spin_unlock_irq(&mq->lock);
mq                157 drivers/mmc/core/queue.c 	mmc_put_card(mq->card, &mq->ctx);
mq                204 drivers/mmc/core/queue.c static int __mmc_init_request(struct mmc_queue *mq, struct request *req,
mq                208 drivers/mmc/core/queue.c 	struct mmc_card *card = mq->card;
mq                235 drivers/mmc/core/queue.c 	struct mmc_queue *mq = set->driver_data;
mq                237 drivers/mmc/core/queue.c 	mmc_exit_request(mq->queue, req);
mq                245 drivers/mmc/core/queue.c 	struct mmc_queue *mq = q->queuedata;
mq                246 drivers/mmc/core/queue.c 	struct mmc_card *card = mq->card;
mq                253 drivers/mmc/core/queue.c 	if (mmc_card_removed(mq->card)) {
mq                258 drivers/mmc/core/queue.c 	issue_type = mmc_issue_type(mq, req);
mq                260 drivers/mmc/core/queue.c 	spin_lock_irq(&mq->lock);
mq                262 drivers/mmc/core/queue.c 	if (mq->recovery_needed || mq->busy) {
mq                263 drivers/mmc/core/queue.c 		spin_unlock_irq(&mq->lock);
mq                269 drivers/mmc/core/queue.c 		if (mmc_cqe_dcmd_busy(mq)) {
mq                270 drivers/mmc/core/queue.c 			mq->cqe_busy |= MMC_CQE_DCMD_BUSY;
mq                271 drivers/mmc/core/queue.c 			spin_unlock_irq(&mq->lock);
mq                290 drivers/mmc/core/queue.c 	mq->busy = true;
mq                292 drivers/mmc/core/queue.c 	mq->in_flight[issue_type] += 1;
mq                293 drivers/mmc/core/queue.c 	get_card = (mmc_tot_in_flight(mq) == 1);
mq                294 drivers/mmc/core/queue.c 	cqe_retune_ok = (mmc_cqe_qcnt(mq) == 1);
mq                296 drivers/mmc/core/queue.c 	spin_unlock_irq(&mq->lock);
mq                304 drivers/mmc/core/queue.c 		mmc_get_card(card, &mq->ctx);
mq                306 drivers/mmc/core/queue.c 	if (mq->use_cqe) {
mq                313 drivers/mmc/core/queue.c 	issued = mmc_blk_mq_issue_rq(mq, req);
mq                330 drivers/mmc/core/queue.c 		spin_lock_irq(&mq->lock);
mq                331 drivers/mmc/core/queue.c 		mq->in_flight[issue_type] -= 1;
mq                332 drivers/mmc/core/queue.c 		if (mmc_tot_in_flight(mq) == 0)
mq                334 drivers/mmc/core/queue.c 		mq->busy = false;
mq                335 drivers/mmc/core/queue.c 		spin_unlock_irq(&mq->lock);
mq                337 drivers/mmc/core/queue.c 			mmc_put_card(card, &mq->ctx);
mq                339 drivers/mmc/core/queue.c 		WRITE_ONCE(mq->busy, false);
mq                353 drivers/mmc/core/queue.c static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
mq                358 drivers/mmc/core/queue.c 	blk_queue_flag_set(QUEUE_FLAG_NONROT, mq->queue);
mq                359 drivers/mmc/core/queue.c 	blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, mq->queue);
mq                361 drivers/mmc/core/queue.c 		mmc_queue_setup_discard(mq->queue, card);
mq                364 drivers/mmc/core/queue.c 		blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_HIGH);
mq                365 drivers/mmc/core/queue.c 	blk_queue_max_hw_sectors(mq->queue,
mq                368 drivers/mmc/core/queue.c 		WARN(!blk_queue_can_use_dma_map_merging(mq->queue,
mq                371 drivers/mmc/core/queue.c 	blk_queue_max_segments(mq->queue, mmc_get_max_segments(host));
mq                376 drivers/mmc/core/queue.c 	blk_queue_logical_block_size(mq->queue, block_size);
mq                383 drivers/mmc/core/queue.c 		blk_queue_max_segment_size(mq->queue,
mq                386 drivers/mmc/core/queue.c 	dma_set_max_seg_size(mmc_dev(host), queue_max_segment_size(mq->queue));
mq                388 drivers/mmc/core/queue.c 	INIT_WORK(&mq->recovery_work, mmc_mq_recovery_handler);
mq                389 drivers/mmc/core/queue.c 	INIT_WORK(&mq->complete_work, mmc_blk_mq_complete_work);
mq                391 drivers/mmc/core/queue.c 	mutex_init(&mq->complete_lock);
mq                393 drivers/mmc/core/queue.c 	init_waitqueue_head(&mq->wait);
mq                411 drivers/mmc/core/queue.c int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card)
mq                416 drivers/mmc/core/queue.c 	mq->card = card;
mq                417 drivers/mmc/core/queue.c 	mq->use_cqe = host->cqe_enabled;
mq                419 drivers/mmc/core/queue.c 	spin_lock_init(&mq->lock);
mq                421 drivers/mmc/core/queue.c 	memset(&mq->tag_set, 0, sizeof(mq->tag_set));
mq                422 drivers/mmc/core/queue.c 	mq->tag_set.ops = &mmc_mq_ops;
mq                427 drivers/mmc/core/queue.c 	if (mq->use_cqe)
mq                428 drivers/mmc/core/queue.c 		mq->tag_set.queue_depth =
mq                431 drivers/mmc/core/queue.c 		mq->tag_set.queue_depth = MMC_QUEUE_DEPTH;
mq                432 drivers/mmc/core/queue.c 	mq->tag_set.numa_node = NUMA_NO_NODE;
mq                433 drivers/mmc/core/queue.c 	mq->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
mq                434 drivers/mmc/core/queue.c 	mq->tag_set.nr_hw_queues = 1;
mq                435 drivers/mmc/core/queue.c 	mq->tag_set.cmd_size = sizeof(struct mmc_queue_req);
mq                436 drivers/mmc/core/queue.c 	mq->tag_set.driver_data = mq;
mq                450 drivers/mmc/core/queue.c 	ret = blk_mq_alloc_tag_set(&mq->tag_set);
mq                454 drivers/mmc/core/queue.c 	mq->queue = blk_mq_init_queue(&mq->tag_set);
mq                455 drivers/mmc/core/queue.c 	if (IS_ERR(mq->queue)) {
mq                456 drivers/mmc/core/queue.c 		ret = PTR_ERR(mq->queue);
mq                461 drivers/mmc/core/queue.c 		mq->queue->backing_dev_info->capabilities |=
mq                464 drivers/mmc/core/queue.c 	mq->queue->queuedata = mq;
mq                465 drivers/mmc/core/queue.c 	blk_queue_rq_timeout(mq->queue, 60 * HZ);
mq                467 drivers/mmc/core/queue.c 	mmc_setup_queue(mq, card);
mq                471 drivers/mmc/core/queue.c 	blk_mq_free_tag_set(&mq->tag_set);
mq                475 drivers/mmc/core/queue.c void mmc_queue_suspend(struct mmc_queue *mq)
mq                477 drivers/mmc/core/queue.c 	blk_mq_quiesce_queue(mq->queue);
mq                483 drivers/mmc/core/queue.c 	mmc_claim_host(mq->card->host);
mq                484 drivers/mmc/core/queue.c 	mmc_release_host(mq->card->host);
mq                487 drivers/mmc/core/queue.c void mmc_queue_resume(struct mmc_queue *mq)
mq                489 drivers/mmc/core/queue.c 	blk_mq_unquiesce_queue(mq->queue);
mq                492 drivers/mmc/core/queue.c void mmc_cleanup_queue(struct mmc_queue *mq)
mq                494 drivers/mmc/core/queue.c 	struct request_queue *q = mq->queue;
mq                504 drivers/mmc/core/queue.c 	blk_mq_free_tag_set(&mq->tag_set);
mq                511 drivers/mmc/core/queue.c 	flush_work(&mq->complete_work);
mq                513 drivers/mmc/core/queue.c 	mq->card = NULL;
mq                519 drivers/mmc/core/queue.c unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
mq                523 drivers/mmc/core/queue.c 	return blk_rq_map_sg(mq->queue, req, mqrq->sg);
mq                106 drivers/mmc/core/queue.h void mmc_cqe_check_busy(struct mmc_queue *mq);
mq                109 drivers/mmc/core/queue.h enum mmc_issue_type mmc_issue_type(struct mmc_queue *mq, struct request *req);
mq                111 drivers/mmc/core/queue.h static inline int mmc_tot_in_flight(struct mmc_queue *mq)
mq                113 drivers/mmc/core/queue.h 	return mq->in_flight[MMC_ISSUE_SYNC] +
mq                114 drivers/mmc/core/queue.h 	       mq->in_flight[MMC_ISSUE_DCMD] +
mq                115 drivers/mmc/core/queue.h 	       mq->in_flight[MMC_ISSUE_ASYNC];
mq                118 drivers/mmc/core/queue.h static inline int mmc_cqe_qcnt(struct mmc_queue *mq)
mq                120 drivers/mmc/core/queue.h 	return mq->in_flight[MMC_ISSUE_DCMD] +
mq                121 drivers/mmc/core/queue.h 	       mq->in_flight[MMC_ISSUE_ASYNC];
mq                 89 drivers/mtd/maps/vmu-flash.c static void vmu_blockread(struct mapleq *mq)
mq                 94 drivers/mtd/maps/vmu-flash.c 	mdev = mq->dev;
mq                101 drivers/mtd/maps/vmu-flash.c 	memcpy(card->blockread, mq->recvbuf->buf + 12,
mq                191 drivers/mtd/maps/vmu-flash.c 			list_del_init(&(mdev->mq->list));
mq                192 drivers/mtd/maps/vmu-flash.c 			kfree(mdev->mq->sendbuf);
mq                193 drivers/mtd/maps/vmu-flash.c 			mdev->mq->sendbuf = NULL;
mq                283 drivers/mtd/maps/vmu-flash.c 			kfree(mdev->mq->sendbuf);
mq                284 drivers/mtd/maps/vmu-flash.c 			mdev->mq->sendbuf = NULL;
mq                285 drivers/mtd/maps/vmu-flash.c 			list_del_init(&(mdev->mq->list));
mq                499 drivers/mtd/maps/vmu-flash.c static void vmu_queryblocks(struct mapleq *mq)
mq                511 drivers/mtd/maps/vmu-flash.c 	mdev = mq->dev;
mq                513 drivers/mtd/maps/vmu-flash.c 	res = (unsigned short *) (mq->recvbuf->buf);
mq                170 drivers/net/ethernet/netronome/nfp/abm/main.h 		} mq;
mq                304 drivers/net/ethernet/netronome/nfp/abm/qdisc.c 		struct nfp_qdisc *mq = nfp_abm_qdisc_tree_deref_slot(slot);
mq                307 drivers/net/ethernet/netronome/nfp/abm/qdisc.c 		if (mq->type != NFP_QDISC_MQ || mq->netdev != netdev)
mq                309 drivers/net/ethernet/netronome/nfp/abm/qdisc.c 		for (i = 0; i < mq->num_children; i++)
mq                310 drivers/net/ethernet/netronome/nfp/abm/qdisc.c 			if (mq->children[i] == qdisc) {
mq                311 drivers/net/ethernet/netronome/nfp/abm/qdisc.c 				mq->children[i] = NULL;
mq                791 drivers/net/ethernet/netronome/nfp/abm/qdisc.c 	memset(&qdisc->mq.stats, 0, sizeof(qdisc->mq.stats));
mq                792 drivers/net/ethernet/netronome/nfp/abm/qdisc.c 	memset(&qdisc->mq.prev_stats, 0, sizeof(qdisc->mq.prev_stats));
mq                803 drivers/net/ethernet/netronome/nfp/abm/qdisc.c 			nfp_abm_stats_propagate(&qdisc->mq.stats,
mq                805 drivers/net/ethernet/netronome/nfp/abm/qdisc.c 			nfp_abm_stats_propagate(&qdisc->mq.prev_stats,
mq                810 drivers/net/ethernet/netronome/nfp/abm/qdisc.c 	nfp_abm_stats_calculate(&qdisc->mq.stats, &qdisc->mq.prev_stats,
mq                169 drivers/net/virtio_net.c 	struct virtio_net_ctrl_mq mq;
mq               1773 drivers/net/virtio_net.c 	vi->ctrl->mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs);
mq               1774 drivers/net/virtio_net.c 	sg_init_one(&sg, &vi->ctrl->mq, sizeof(vi->ctrl->mq));
mq               2053 drivers/net/wireless/intel/iwlwifi/dvm/main.c 	int mq = priv->queue_to_mac80211[queue];
mq               2055 drivers/net/wireless/intel/iwlwifi/dvm/main.c 	if (WARN_ON_ONCE(mq == IWL_INVALID_MAC80211_QUEUE))
mq               2058 drivers/net/wireless/intel/iwlwifi/dvm/main.c 	if (atomic_inc_return(&priv->queue_stop_count[mq]) > 1) {
mq               2061 drivers/net/wireless/intel/iwlwifi/dvm/main.c 			queue, mq);
mq               2065 drivers/net/wireless/intel/iwlwifi/dvm/main.c 	set_bit(mq, &priv->transport_queue_stop);
mq               2066 drivers/net/wireless/intel/iwlwifi/dvm/main.c 	ieee80211_stop_queue(priv->hw, mq);
mq               2072 drivers/net/wireless/intel/iwlwifi/dvm/main.c 	int mq = priv->queue_to_mac80211[queue];
mq               2074 drivers/net/wireless/intel/iwlwifi/dvm/main.c 	if (WARN_ON_ONCE(mq == IWL_INVALID_MAC80211_QUEUE))
mq               2077 drivers/net/wireless/intel/iwlwifi/dvm/main.c 	if (atomic_dec_return(&priv->queue_stop_count[mq]) > 0) {
mq               2080 drivers/net/wireless/intel/iwlwifi/dvm/main.c 			queue, mq);
mq               2084 drivers/net/wireless/intel/iwlwifi/dvm/main.c 	clear_bit(mq, &priv->transport_queue_stop);
mq               2087 drivers/net/wireless/intel/iwlwifi/dvm/main.c 		ieee80211_wake_queue(priv->hw, mq);
mq               2092 drivers/net/wireless/intel/iwlwifi/dvm/main.c 	int mq;
mq               2097 drivers/net/wireless/intel/iwlwifi/dvm/main.c 	for (mq = 0; mq < IWLAGN_FIRST_AMPDU_QUEUE; mq++) {
mq               2098 drivers/net/wireless/intel/iwlwifi/dvm/main.c 		if (!test_bit(mq, &priv->transport_queue_stop)) {
mq               2099 drivers/net/wireless/intel/iwlwifi/dvm/main.c 			IWL_DEBUG_TX_QUEUES(priv, "Wake queue %d\n", mq);
mq               2100 drivers/net/wireless/intel/iwlwifi/dvm/main.c 			ieee80211_wake_queue(priv->hw, mq);
mq               2102 drivers/net/wireless/intel/iwlwifi/dvm/main.c 			IWL_DEBUG_TX_QUEUES(priv, "Don't wake queue %d\n", mq);
mq                465 drivers/net/wireless/intel/iwlwifi/dvm/tx.c static int iwlagn_alloc_agg_txq(struct iwl_priv *priv, int mq)
mq                472 drivers/net/wireless/intel/iwlwifi/dvm/tx.c 			priv->queue_to_mac80211[q] = mq;
mq                 24 drivers/scsi/arm/msgqueue.c 	struct msgqueue_entry *mq;
mq                 26 drivers/scsi/arm/msgqueue.c 	if ((mq = msgq->free) != NULL)
mq                 27 drivers/scsi/arm/msgqueue.c 		msgq->free = mq->next;
mq                 29 drivers/scsi/arm/msgqueue.c 	return mq;
mq                 38 drivers/scsi/arm/msgqueue.c static void mqe_free(MsgQueue_t *msgq, struct msgqueue_entry *mq)
mq                 40 drivers/scsi/arm/msgqueue.c 	if (mq) {
mq                 41 drivers/scsi/arm/msgqueue.c 		mq->next = msgq->free;
mq                 42 drivers/scsi/arm/msgqueue.c 		msgq->free = mq;
mq                 82 drivers/scsi/arm/msgqueue.c 	struct msgqueue_entry *mq = msgq->qe;
mq                 85 drivers/scsi/arm/msgqueue.c 	for (mq = msgq->qe; mq; mq = mq->next)
mq                 86 drivers/scsi/arm/msgqueue.c 		length += mq->msg.length;
mq                100 drivers/scsi/arm/msgqueue.c 	struct msgqueue_entry *mq;
mq                102 drivers/scsi/arm/msgqueue.c 	for (mq = msgq->qe; mq && msgno; mq = mq->next, msgno--);
mq                104 drivers/scsi/arm/msgqueue.c 	return mq ? &mq->msg : NULL;
mq                117 drivers/scsi/arm/msgqueue.c 	struct msgqueue_entry *mq = mqe_alloc(msgq);
mq                120 drivers/scsi/arm/msgqueue.c 	if (mq) {
mq                126 drivers/scsi/arm/msgqueue.c 			mq->msg.msg[i] = va_arg(ap, unsigned int);
mq                129 drivers/scsi/arm/msgqueue.c 		mq->msg.length = length;
mq                130 drivers/scsi/arm/msgqueue.c 		mq->msg.fifo = 0;
mq                131 drivers/scsi/arm/msgqueue.c 		mq->next = NULL;
mq                137 drivers/scsi/arm/msgqueue.c 		*mqp = mq;
mq                140 drivers/scsi/arm/msgqueue.c 	return mq != NULL;
mq                150 drivers/scsi/arm/msgqueue.c 	struct msgqueue_entry *mq, *mqnext;
mq                152 drivers/scsi/arm/msgqueue.c 	for (mq = msgq->qe; mq; mq = mqnext) {
mq                153 drivers/scsi/arm/msgqueue.c 		mqnext = mq->next;
mq                154 drivers/scsi/arm/msgqueue.c 		mqe_free(msgq, mq);
mq               15269 drivers/scsi/lpfc/lpfc_sli.c lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
mq               15283 drivers/scsi/lpfc/lpfc_sli.c 	       mq->page_count);
mq               15287 drivers/scsi/lpfc/lpfc_sli.c 	switch (mq->entry_count) {
mq               15305 drivers/scsi/lpfc/lpfc_sli.c 	list_for_each_entry(dmabuf, &mq->page_list, list) {
mq               15335 drivers/scsi/lpfc/lpfc_sli.c lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
mq               15348 drivers/scsi/lpfc/lpfc_sli.c 	if (!mq || !cq)
mq               15365 drivers/scsi/lpfc/lpfc_sli.c 	       &mq_create_ext->u.request, mq->page_count);
mq               15385 drivers/scsi/lpfc/lpfc_sli.c 	switch (mq->entry_count) {
mq               15389 drivers/scsi/lpfc/lpfc_sli.c 				mq->entry_count);
mq               15390 drivers/scsi/lpfc/lpfc_sli.c 		if (mq->entry_count < 16) {
mq               15416 drivers/scsi/lpfc/lpfc_sli.c 	list_for_each_entry(dmabuf, &mq->page_list, list) {
mq               15424 drivers/scsi/lpfc/lpfc_sli.c 	mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
mq               15431 drivers/scsi/lpfc/lpfc_sli.c 		lpfc_mq_create_fb_init(phba, mq, mbox, cq);
mq               15435 drivers/scsi/lpfc/lpfc_sli.c 		mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
mq               15450 drivers/scsi/lpfc/lpfc_sli.c 	if (mq->queue_id == 0xFFFF) {
mq               15454 drivers/scsi/lpfc/lpfc_sli.c 	mq->type = LPFC_MQ;
mq               15455 drivers/scsi/lpfc/lpfc_sli.c 	mq->assoc_qid = cq->queue_id;
mq               15456 drivers/scsi/lpfc/lpfc_sli.c 	mq->subtype = subtype;
mq               15457 drivers/scsi/lpfc/lpfc_sli.c 	mq->host_index = 0;
mq               15458 drivers/scsi/lpfc/lpfc_sli.c 	mq->hba_index = 0;
mq               15461 drivers/scsi/lpfc/lpfc_sli.c 	list_add_tail(&mq->list, &cq->child_list);
mq               16339 drivers/scsi/lpfc/lpfc_sli.c lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
mq               16347 drivers/scsi/lpfc/lpfc_sli.c 	if (!mq)
mq               16349 drivers/scsi/lpfc/lpfc_sli.c 	mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
mq               16358 drivers/scsi/lpfc/lpfc_sli.c 	       mq->queue_id);
mq               16359 drivers/scsi/lpfc/lpfc_sli.c 	mbox->vport = mq->phba->pport;
mq               16361 drivers/scsi/lpfc/lpfc_sli.c 	rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
mq               16375 drivers/scsi/lpfc/lpfc_sli.c 	list_del_init(&mq->list);
mq               16376 drivers/scsi/lpfc/lpfc_sli.c 	mempool_free(mbox, mq->phba->mbox_mem_pool);
mq                667 drivers/scsi/qla2xxx/qla_dbg.c 	struct qla2xxx_mq_chain *mq = ptr;
mq                674 drivers/scsi/qla2xxx/qla_dbg.c 	mq = ptr;
mq                675 drivers/scsi/qla2xxx/qla_dbg.c 	*last_chain = &mq->type;
mq                676 drivers/scsi/qla2xxx/qla_dbg.c 	mq->type = htonl(DUMP_CHAIN_MQ);
mq                677 drivers/scsi/qla2xxx/qla_dbg.c 	mq->chain_size = htonl(sizeof(struct qla2xxx_mq_chain));
mq                681 drivers/scsi/qla2xxx/qla_dbg.c 	mq->count = htonl(que_cnt);
mq                685 drivers/scsi/qla2xxx/qla_dbg.c 		mq->qregs[que_idx] =
mq                687 drivers/scsi/qla2xxx/qla_dbg.c 		mq->qregs[que_idx+1] =
mq                689 drivers/scsi/qla2xxx/qla_dbg.c 		mq->qregs[que_idx+2] =
mq                691 drivers/scsi/qla2xxx/qla_dbg.c 		mq->qregs[que_idx+3] =
mq                122 drivers/sh/maple/maple.c 			void (*callback) (struct mapleq *mq),
mq                140 drivers/sh/maple/maple.c 	struct mapleq *mq;
mq                143 drivers/sh/maple/maple.c 	mq = mdev->mq;
mq                144 drivers/sh/maple/maple.c 	kmem_cache_free(maple_queue_cache, mq->recvbuf);
mq                145 drivers/sh/maple/maple.c 	kfree(mq);
mq                172 drivers/sh/maple/maple.c 	mdev->mq->command = command;
mq                173 drivers/sh/maple/maple.c 	mdev->mq->length = length;
mq                176 drivers/sh/maple/maple.c 	mdev->mq->sendbuf = sendbuf;
mq                179 drivers/sh/maple/maple.c 	list_add_tail(&mdev->mq->list, &maple_waitq);
mq                188 drivers/sh/maple/maple.c 	struct mapleq *mq;
mq                190 drivers/sh/maple/maple.c 	mq = kzalloc(sizeof(*mq), GFP_KERNEL);
mq                191 drivers/sh/maple/maple.c 	if (!mq)
mq                194 drivers/sh/maple/maple.c 	INIT_LIST_HEAD(&mq->list);
mq                195 drivers/sh/maple/maple.c 	mq->dev = mdev;
mq                196 drivers/sh/maple/maple.c 	mq->recvbuf = kmem_cache_zalloc(maple_queue_cache, GFP_KERNEL);
mq                197 drivers/sh/maple/maple.c 	if (!mq->recvbuf)
mq                199 drivers/sh/maple/maple.c 	mq->recvbuf->buf = &((mq->recvbuf->bufx)[0]);
mq                201 drivers/sh/maple/maple.c 	return mq;
mq                204 drivers/sh/maple/maple.c 	kfree(mq);
mq                225 drivers/sh/maple/maple.c 	mdev->mq = maple_allocq(mdev);
mq                227 drivers/sh/maple/maple.c 	if (!mdev->mq) {
mq                239 drivers/sh/maple/maple.c 	kmem_cache_free(maple_queue_cache, mdev->mq->recvbuf);
mq                240 drivers/sh/maple/maple.c 	kfree(mdev->mq);
mq                247 drivers/sh/maple/maple.c static void maple_build_block(struct mapleq *mq)
mq                250 drivers/sh/maple/maple.c 	unsigned long *lsendbuf = mq->sendbuf;
mq                252 drivers/sh/maple/maple.c 	port = mq->dev->port & 3;
mq                253 drivers/sh/maple/maple.c 	unit = mq->dev->unit;
mq                254 drivers/sh/maple/maple.c 	len = mq->length;
mq                262 drivers/sh/maple/maple.c 	*maple_sendptr++ = virt_to_phys(mq->recvbuf->buf);
mq                264 drivers/sh/maple/maple.c 	    mq->command | (to << 8) | (from << 16) | (len << 24);
mq                273 drivers/sh/maple/maple.c 	struct mapleq *mq, *nmq;
mq                293 drivers/sh/maple/maple.c 	list_for_each_entry_safe(mq, nmq, &maple_waitq, list) {
mq                294 drivers/sh/maple/maple.c 		maple_build_block(mq);
mq                295 drivers/sh/maple/maple.c 		list_del_init(&mq->list);
mq                296 drivers/sh/maple/maple.c 		list_add_tail(&mq->list, &maple_sentq);
mq                337 drivers/sh/maple/maple.c 	recvbuf = mdev->mq->recvbuf->buf;
mq                633 drivers/sh/maple/maple.c 	struct mapleq *mq, *nmq;
mq                642 drivers/sh/maple/maple.c 		list_for_each_entry_safe(mq, nmq, &maple_sentq, list) {
mq                643 drivers/sh/maple/maple.c 			mdev = mq->dev;
mq                644 drivers/sh/maple/maple.c 			recvbuf = mq->recvbuf->buf;
mq                648 drivers/sh/maple/maple.c 			kfree(mq->sendbuf);
mq                649 drivers/sh/maple/maple.c 			list_del_init(&mq->list);
mq                662 drivers/sh/maple/maple.c 					mdev->callback(mq);
mq                 70 include/linux/maple.h 	struct mapleq *mq;
mq                 71 include/linux/maple.h 	void (*callback) (struct mapleq * mq);
mq                 90 include/linux/maple.h 			    void (*callback) (struct mapleq * mq),