Home
last modified time | relevance | path

Searched refs:mq (Results 1 – 46 of 46) sorted by relevance

/linux-4.4.14/drivers/md/
Ddm-cache-policy-mq.c495 static void hash_insert(struct mq_policy *mq, struct entry *e) in hash_insert() argument
497 unsigned h = hash_64(from_oblock(e->oblock), mq->hash_bits); in hash_insert()
499 hlist_add_head(&e->hlist, mq->table + h); in hash_insert()
502 static struct entry *hash_lookup(struct mq_policy *mq, dm_oblock_t oblock) in hash_lookup() argument
504 unsigned h = hash_64(from_oblock(oblock), mq->hash_bits); in hash_lookup()
505 struct hlist_head *bucket = mq->table + h; in hash_lookup()
525 static bool any_free_cblocks(struct mq_policy *mq) in any_free_cblocks() argument
527 return !epool_empty(&mq->cache_pool); in any_free_cblocks()
530 static bool any_clean_cblocks(struct mq_policy *mq) in any_clean_cblocks() argument
532 return !queue_empty(&mq->cache_clean); in any_clean_cblocks()
[all …]
Ddm-cache-policy-smq.c840 static struct entry *writeback_sentinel(struct smq_policy *mq, unsigned level) in writeback_sentinel() argument
842 return get_sentinel(&mq->writeback_sentinel_alloc, level, mq->current_writeback_sentinels); in writeback_sentinel()
845 static struct entry *demote_sentinel(struct smq_policy *mq, unsigned level) in demote_sentinel() argument
847 return get_sentinel(&mq->demote_sentinel_alloc, level, mq->current_demote_sentinels); in demote_sentinel()
850 static void __update_writeback_sentinels(struct smq_policy *mq) in __update_writeback_sentinels() argument
853 struct queue *q = &mq->dirty; in __update_writeback_sentinels()
857 sentinel = writeback_sentinel(mq, level); in __update_writeback_sentinels()
863 static void __update_demote_sentinels(struct smq_policy *mq) in __update_demote_sentinels() argument
866 struct queue *q = &mq->clean; in __update_demote_sentinels()
870 sentinel = demote_sentinel(mq, level); in __update_demote_sentinels()
[all …]
DMakefile15 dm-cache-mq-y += dm-cache-policy-mq.o
57 obj-$(CONFIG_DM_CACHE_MQ) += dm-cache-mq.o
DKconfig217 bool "request-based DM: use blk-mq I/O path by default"
220 This option enables the blk-mq based I/O path for request-based
/linux-4.4.14/drivers/scsi/arm/
Dmsgqueue.c27 struct msgqueue_entry *mq; in mqe_alloc() local
29 if ((mq = msgq->free) != NULL) in mqe_alloc()
30 msgq->free = mq->next; in mqe_alloc()
32 return mq; in mqe_alloc()
41 static void mqe_free(MsgQueue_t *msgq, struct msgqueue_entry *mq) in mqe_free() argument
43 if (mq) { in mqe_free()
44 mq->next = msgq->free; in mqe_free()
45 msgq->free = mq; in mqe_free()
85 struct msgqueue_entry *mq = msgq->qe; in msgqueue_msglength() local
88 for (mq = msgq->qe; mq; mq = mq->next) in msgqueue_msglength()
[all …]
/linux-4.4.14/drivers/mmc/card/
Dqueue.c31 struct mmc_queue *mq = q->queuedata; in mmc_prep_request() local
41 if (mq && (mmc_card_removed(mq->card) || mmc_access_rpmb(mq))) in mmc_prep_request()
51 struct mmc_queue *mq = d; in mmc_queue_thread() local
52 struct request_queue *q = mq->queue; in mmc_queue_thread()
56 down(&mq->thread_sem); in mmc_queue_thread()
64 mq->mqrq_cur->req = req; in mmc_queue_thread()
67 if (req || mq->mqrq_prev->req) { in mmc_queue_thread()
70 mq->issue_fn(mq, req); in mmc_queue_thread()
72 if (mq->flags & MMC_QUEUE_NEW_REQUEST) { in mmc_queue_thread()
73 mq->flags &= ~MMC_QUEUE_NEW_REQUEST; in mmc_queue_thread()
[all …]
Dblock.c1134 int mmc_access_rpmb(struct mmc_queue *mq) in mmc_access_rpmb() argument
1136 struct mmc_blk_data *md = mq->data; in mmc_access_rpmb()
1146 static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) in mmc_blk_issue_discard_rq() argument
1148 struct mmc_blk_data *md = mq->data; in mmc_blk_issue_discard_rq()
1189 static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq, in mmc_blk_issue_secdiscard_rq() argument
1192 struct mmc_blk_data *md = mq->data; in mmc_blk_issue_secdiscard_rq()
1256 static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req) in mmc_blk_issue_flush() argument
1258 struct mmc_blk_data *md = mq->data; in mmc_blk_issue_flush()
1467 struct mmc_queue *mq) in mmc_blk_rw_rq_prep() argument
1472 struct mmc_blk_data *md = mq->data; in mmc_blk_rw_rq_prep()
[all …]
/linux-4.4.14/drivers/staging/rdma/amso1100/
Dc2_cq.c92 q = &cq->mq; in c2_cq_clean()
138 ce = c2_mq_consume(&cq->mq); in c2_poll_one()
150 c2_mq_free(&cq->mq); in c2_poll_one()
151 ce = c2_mq_consume(&cq->mq); in c2_poll_one()
196 c2_mq_free(&cq->mq); in c2_poll_one()
230 shared = cq->mq.peer; in c2_arm_cq()
250 ret = !c2_mq_empty(&cq->mq); in c2_arm_cq()
257 static void c2_free_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq) in c2_free_cq_buf() argument
259 dma_free_coherent(&c2dev->pcidev->dev, mq->q_size * mq->msg_size, in c2_free_cq_buf()
260 mq->msg_pool.host, dma_unmap_addr(mq, mapping)); in c2_free_cq_buf()
[all …]
Dc2_ae.c149 struct c2_mq *mq = c2dev->qptr_array[mq_index]; in c2_ae_event() local
164 wr = c2_mq_consume(mq); in c2_ae_event()
326 c2_mq_free(mq); in c2_ae_event()
Dc2_provider.h100 struct c2_mq mq; member
/linux-4.4.14/drivers/sh/maple/
Dmaple.c122 void (*callback) (struct mapleq *mq), in maple_getcond_callback()
140 struct mapleq *mq; in maple_release_device() local
143 mq = mdev->mq; in maple_release_device()
144 kmem_cache_free(maple_queue_cache, mq->recvbuf); in maple_release_device()
145 kfree(mq); in maple_release_device()
172 mdev->mq->command = command; in maple_add_packet()
173 mdev->mq->length = length; in maple_add_packet()
176 mdev->mq->sendbuf = sendbuf; in maple_add_packet()
179 list_add_tail(&mdev->mq->list, &maple_waitq); in maple_add_packet()
188 struct mapleq *mq; in maple_allocq() local
[all …]
/linux-4.4.14/drivers/isdn/capi/
Dcapilib.c47 struct capilib_msgidqueue *mq; in mq_enqueue() local
48 if ((mq = np->msgidfree) == NULL) in mq_enqueue()
50 np->msgidfree = mq->next; in mq_enqueue()
51 mq->msgid = msgid; in mq_enqueue()
52 mq->next = NULL; in mq_enqueue()
54 np->msgidlast->next = mq; in mq_enqueue()
55 np->msgidlast = mq; in mq_enqueue()
57 np->msgidqueue = mq; in mq_enqueue()
67 struct capilib_msgidqueue *mq = *pp; in mq_dequeue() local
68 *pp = mq->next; in mq_dequeue()
[all …]
/linux-4.4.14/drivers/misc/sgi-xp/
Dxpc_uv.c109 xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq, int cpu, char *irq_name) in xpc_get_gru_mq_irq_uv() argument
111 int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade); in xpc_get_gru_mq_irq_uv()
114 mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset, in xpc_get_gru_mq_irq_uv()
116 if (mq->irq < 0) in xpc_get_gru_mq_irq_uv()
117 return mq->irq; in xpc_get_gru_mq_irq_uv()
119 mq->mmr_value = uv_read_global_mmr64(mmr_pnode, mq->mmr_offset); in xpc_get_gru_mq_irq_uv()
123 mq->irq = SGI_XPC_ACTIVATE; in xpc_get_gru_mq_irq_uv()
125 mq->irq = SGI_XPC_NOTIFY; in xpc_get_gru_mq_irq_uv()
129 mq->mmr_value = (unsigned long)cpu_physical_id(cpu) << 32 | mq->irq; in xpc_get_gru_mq_irq_uv()
130 uv_write_global_mmr64(mmr_pnode, mq->mmr_offset, mq->mmr_value); in xpc_get_gru_mq_irq_uv()
[all …]
/linux-4.4.14/drivers/misc/sgi-gru/
Dgrukservices.c146 #define HSTATUS(mq, h) ((mq) + offsetof(struct message_queue, hstatus[h])) argument
560 struct message_queue *mq = p; in gru_create_message_queue() local
564 memset(mq, 0, bytes); in gru_create_message_queue()
565 mq->start = &mq->data; in gru_create_message_queue()
566 mq->start2 = &mq->data + (qlines / 2 - 1) * GRU_CACHE_LINE_BYTES; in gru_create_message_queue()
567 mq->next = &mq->data; in gru_create_message_queue()
568 mq->limit = &mq->data + (qlines - 2) * GRU_CACHE_LINE_BYTES; in gru_create_message_queue()
569 mq->qlines = qlines; in gru_create_message_queue()
570 mq->hstatus[0] = 0; in gru_create_message_queue()
571 mq->hstatus[1] = 1; in gru_create_message_queue()
[all …]
Dgrukservices.h45 void *mq; /* message queue vaddress */ member
/linux-4.4.14/block/
DMakefile8 blk-iopoll.o blk-lib.o blk-mq.o blk-mq-tag.o \
9 blk-mq-sysfs.o blk-mq-cpu.o blk-mq-cpumap.o ioctl.o \
/linux-4.4.14/drivers/mailbox/
Domap-mailbox.c309 struct omap_mbox_queue *mq = in mbox_rx_work() local
314 while (kfifo_len(&mq->fifo) >= sizeof(msg)) { in mbox_rx_work()
315 len = kfifo_out(&mq->fifo, (unsigned char *)&msg, sizeof(msg)); in mbox_rx_work()
318 mbox_chan_received_data(mq->mbox->chan, (void *)msg); in mbox_rx_work()
319 spin_lock_irq(&mq->lock); in mbox_rx_work()
320 if (mq->full) { in mbox_rx_work()
321 mq->full = false; in mbox_rx_work()
322 _omap_mbox_enable_irq(mq->mbox, IRQ_RX); in mbox_rx_work()
324 spin_unlock_irq(&mq->lock); in mbox_rx_work()
340 struct omap_mbox_queue *mq = mbox->rxq; in __mbox_rx_interrupt() local
[all …]
/linux-4.4.14/include/linux/
Dmaple.h69 struct mapleq *mq; member
70 void (*callback) (struct mapleq * mq);
89 void (*callback) (struct mapleq * mq),
/linux-4.4.14/drivers/mtd/maps/
Dvmu-flash.c91 static void vmu_blockread(struct mapleq *mq) in vmu_blockread() argument
96 mdev = mq->dev; in vmu_blockread()
103 memcpy(card->blockread, mq->recvbuf->buf + 12, in vmu_blockread()
193 list_del_init(&(mdev->mq->list)); in maple_vmu_read_block()
194 kfree(mdev->mq->sendbuf); in maple_vmu_read_block()
195 mdev->mq->sendbuf = NULL; in maple_vmu_read_block()
285 kfree(mdev->mq->sendbuf); in maple_vmu_write_block()
286 mdev->mq->sendbuf = NULL; in maple_vmu_write_block()
287 list_del_init(&(mdev->mq->list)); in maple_vmu_write_block()
501 static void vmu_queryblocks(struct mapleq *mq) in vmu_queryblocks() argument
[all …]
/linux-4.4.14/drivers/input/mouse/
Dmaplemouse.c26 static void dc_mouse_callback(struct mapleq *mq) in dc_mouse_callback() argument
29 struct maple_device *mapledev = mq->dev; in dc_mouse_callback()
32 unsigned char *res = mq->recvbuf->buf; in dc_mouse_callback()
/linux-4.4.14/Documentation/device-mapper/
Dcache-policies.txt28 multiqueue (mq)
67 Internally the mq policy determines a promotion threshold. If the hit
82 with the multiqueue (mq) policy.
84 The smq policy (vs mq) offers the promise of less memory utilization,
88 Users may switch from "mq" to "smq" simply by appropriately reloading a
90 mq policy's hints to be dropped. Also, performance of the cache may
95 The mq policy uses a lot of memory; 88 bytes per cache block on a 64
156 /dev/sdd 512 0 mq 4 sequential_threshold 1024 random_threshold 8"
Dcache.txt146 For instance, the 'mq' policy, which is currently the default policy,
309 mq 4 sequential_threshold 1024 random_threshold 8'
/linux-4.4.14/Documentation/devicetree/bindings/powerpc/4xx/
Dppc440spe-adma.txt84 - compatible : "ibm,mq-440spe";
89 MQ0: mq {
90 compatible = "ibm,mq-440spe";
/linux-4.4.14/drivers/net/wireless/iwlwifi/dvm/
Dmain.c1952 int mq = priv->queue_to_mac80211[queue]; in iwl_stop_sw_queue() local
1954 if (WARN_ON_ONCE(mq == IWL_INVALID_MAC80211_QUEUE)) in iwl_stop_sw_queue()
1957 if (atomic_inc_return(&priv->queue_stop_count[mq]) > 1) { in iwl_stop_sw_queue()
1960 queue, mq); in iwl_stop_sw_queue()
1964 set_bit(mq, &priv->transport_queue_stop); in iwl_stop_sw_queue()
1965 ieee80211_stop_queue(priv->hw, mq); in iwl_stop_sw_queue()
1971 int mq = priv->queue_to_mac80211[queue]; in iwl_wake_sw_queue() local
1973 if (WARN_ON_ONCE(mq == IWL_INVALID_MAC80211_QUEUE)) in iwl_wake_sw_queue()
1976 if (atomic_dec_return(&priv->queue_stop_count[mq]) > 0) { in iwl_wake_sw_queue()
1979 queue, mq); in iwl_wake_sw_queue()
[all …]
Dtx.c483 static int iwlagn_alloc_agg_txq(struct iwl_priv *priv, int mq) in iwlagn_alloc_agg_txq() argument
490 priv->queue_to_mac80211[q] = mq; in iwlagn_alloc_agg_txq()
/linux-4.4.14/drivers/input/joystick/
Dmaplecontrol.c26 static void dc_pad_callback(struct mapleq *mq) in dc_pad_callback() argument
29 struct maple_device *mapledev = mq->dev; in dc_pad_callback()
32 unsigned char *res = mq->recvbuf->buf; in dc_pad_callback()
/linux-4.4.14/drivers/infiniband/hw/ocrdma/
Docrdma_hw.c123 (dev->mq.cq.va + (dev->mq.cq.tail * sizeof(struct ocrdma_mcqe))); in ocrdma_get_mcqe()
132 dev->mq.cq.tail = (dev->mq.cq.tail + 1) & (OCRDMA_MQ_CQ_LEN - 1); in ocrdma_mcq_inc_tail()
137 return dev->mq.sq.va + (dev->mq.sq.head * sizeof(struct ocrdma_mqe)); in ocrdma_get_mqe()
142 dev->mq.sq.head = (dev->mq.sq.head + 1) & (OCRDMA_MQ_LEN - 1); in ocrdma_mq_inc_head()
147 return dev->mq.sq.va + (dev->mqe_ctx.tag * sizeof(struct ocrdma_mqe)); in ocrdma_get_mqe_rsp()
321 val |= dev->mq.sq.id & OCRDMA_MQ_ID_MASK; in ocrdma_ring_mq_db()
562 struct ocrdma_queue_info *mq, in ocrdma_mbx_create_mq() argument
571 num_pages = PAGES_4K_SPANNED(mq->va, mq->size); in ocrdma_mbx_create_mq()
586 cmd->async_cqid_ringsize |= (ocrdma_encoded_q_len(mq->len) << in ocrdma_mbx_create_mq()
591 ocrdma_build_q_pages(pa, num_pages, mq->dma, PAGE_SIZE_4K); in ocrdma_mbx_create_mq()
[all …]
Docrdma.h278 struct ocrdma_mq mq; member
/linux-4.4.14/Documentation/ABI/testing/
Dsysfs-block-dm45 Description: Request-based Device-mapper blk-mq I/O path mode.
46 Contains the value 1 if the device is using blk-mq.
/linux-4.4.14/drivers/input/keyboard/
Dmaple_keyb.c139 static void dc_kbd_callback(struct mapleq *mq) in dc_kbd_callback() argument
141 struct maple_device *mapledev = mq->dev; in dc_kbd_callback()
143 unsigned long *buf = (unsigned long *)(mq->recvbuf->buf); in dc_kbd_callback()
/linux-4.4.14/arch/powerpc/kernel/
Dppc32.h28 unsigned int mq; /* 601 only (not used at present) */ member
Dprocess.c1257 regs->mq = 0; in start_thread()
Dasm-offsets.c327 DEFINE(_MQ, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, mq)); in main()
Dptrace.c108 REG_OFFSET_NAME(mq),
/linux-4.4.14/drivers/net/wireless/iwlwifi/mvm/
Dops.c810 unsigned long mq; in iwl_mvm_stop_sw_queue() local
814 mq = mvm->queue_info[queue].hw_queue_to_mac80211; in iwl_mvm_stop_sw_queue()
817 if (WARN_ON_ONCE(!mq)) in iwl_mvm_stop_sw_queue()
820 for_each_set_bit(q, &mq, IEEE80211_MAX_QUEUES) { in iwl_mvm_stop_sw_queue()
835 unsigned long mq; in iwl_mvm_wake_sw_queue() local
839 mq = mvm->queue_info[queue].hw_queue_to_mac80211; in iwl_mvm_wake_sw_queue()
842 if (WARN_ON_ONCE(!mq)) in iwl_mvm_wake_sw_queue()
845 for_each_set_bit(q, &mq, IEEE80211_MAX_QUEUES) { in iwl_mvm_wake_sw_queue()
/linux-4.4.14/arch/powerpc/include/uapi/asm/
Dptrace.h43 unsigned long mq; /* 601 only (not used at present) */ member
/linux-4.4.14/Documentation/block/
Dnull_blk.txt75 Register device with LightNVM. Requires blk-mq to be used.
/linux-4.4.14/drivers/scsi/qla2xxx/
Dqla_dbg.c629 struct qla2xxx_mq_chain *mq = ptr; in qla25xx_copy_mq() local
635 mq = ptr; in qla25xx_copy_mq()
636 *last_chain = &mq->type; in qla25xx_copy_mq()
637 mq->type = htonl(DUMP_CHAIN_MQ); in qla25xx_copy_mq()
638 mq->chain_size = htonl(sizeof(struct qla2xxx_mq_chain)); in qla25xx_copy_mq()
642 mq->count = htonl(que_cnt); in qla25xx_copy_mq()
646 mq->qregs[que_idx] = in qla25xx_copy_mq()
648 mq->qregs[que_idx+1] = in qla25xx_copy_mq()
650 mq->qregs[que_idx+2] = in qla25xx_copy_mq()
652 mq->qregs[que_idx+3] = in qla25xx_copy_mq()
/linux-4.4.14/drivers/scsi/
Dscsi_lib.c585 static void scsi_free_sgtable(struct scsi_data_buffer *sdb, bool mq) in scsi_free_sgtable() argument
587 if (mq && sdb->table.orig_nents <= SCSI_MAX_SG_SEGMENTS) in scsi_free_sgtable()
589 __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, mq, scsi_sg_free); in scsi_free_sgtable()
592 static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents, bool mq) in scsi_alloc_sgtable() argument
599 if (mq) { in scsi_alloc_sgtable()
611 scsi_free_sgtable(sdb, mq); in scsi_alloc_sgtable()
DKconfig49 bool "SCSI: use blk-mq I/O path by default"
52 This option enables the new blk-mq based I/O path for SCSI
719 This is equivalent to the "eata=mq:8" boot option.
1438 This is equivalent to the "u14-34f=mq:8" boot option.
/linux-4.4.14/arch/ia64/include/asm/sn/
Dsn_sal.h1201 sn_mq_watchlist_alloc(int blade, void *mq, unsigned int mq_size, in sn_mq_watchlist_alloc() argument
1209 addr = (unsigned long)mq; in sn_mq_watchlist_alloc()
/linux-4.4.14/arch/powerpc/boot/dts/
Dicon.dts108 MQ0: mq {
109 compatible = "ibm,mq-440spe";
Dkatmai.dts112 MQ0: mq {
113 compatible = "ibm,mq-440spe";
/linux-4.4.14/Documentation/DocBook/
Dkernel-api.xml.db573 API---audit-mq-open
574 API---audit-mq-sendrecv
575 API---audit-mq-notify
576 API---audit-mq-getsetattr
/linux-4.4.14/drivers/scsi/lpfc/
Dlpfc_sli.c13190 lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq, in lpfc_mq_create_fb_init() argument
13204 mq->page_count); in lpfc_mq_create_fb_init()
13208 switch (mq->entry_count) { in lpfc_mq_create_fb_init()
13226 list_for_each_entry(dmabuf, &mq->page_list, list) { in lpfc_mq_create_fb_init()
13256 lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, in lpfc_mq_create() argument
13269 if (!mq || !cq) in lpfc_mq_create()
13286 &mq_create_ext->u.request, mq->page_count); in lpfc_mq_create()
13306 switch (mq->entry_count) { in lpfc_mq_create()
13310 mq->entry_count); in lpfc_mq_create()
13311 if (mq->entry_count < 16) { in lpfc_mq_create()
[all …]
/linux-4.4.14/Documentation/sysctl/
Dnet.txt63 interfaces still use mq as root qdisc, which in turn uses this default for its